mirror of
https://github.com/oxen-io/oxen-storage-server.git
synced 2023-12-13 21:00:26 +01:00
Compare commits
474 commits
Author | SHA1 | Date | |
---|---|---|---|
|
0dada891a6 | ||
|
ab1a84f6c5 | ||
|
0ff8fb8922 | ||
|
b0015833f1 | ||
|
13effb23e8 | ||
|
8e72692dc6 | ||
|
fc32085574 | ||
|
a0bccc0a38 | ||
|
a61c332c86 | ||
|
295017a08e | ||
|
d4379fc53f | ||
|
1ac5ef63dd | ||
|
0554aa9e84 | ||
|
e279651bb5 | ||
|
351850d2c6 | ||
|
71f3d311da | ||
|
b3272c8326 | ||
|
4d865196a1 | ||
|
2c48803c8c | ||
|
562b29f357 | ||
|
95f632bbe9 | ||
|
a099ff651a | ||
|
9bb1c548fd | ||
|
5d71a4c293 | ||
|
cc62974186 | ||
|
0980e8807d | ||
|
baaedc929b | ||
|
f475f35034 | ||
|
5492481860 | ||
|
ae8ba81549 | ||
|
b663a46e07 | ||
|
ed9e467058 | ||
|
ab7b3c177b | ||
|
535582e138 | ||
|
f7a04485b3 | ||
|
7c7843d75f | ||
|
e91154f9a0 | ||
|
905143a1ff | ||
|
860cffe17e | ||
|
13e3a1adcb | ||
|
0eb0f8babe | ||
|
c97971c419 | ||
|
a60a07f6dd | ||
|
f8900e572f | ||
|
f402017baa | ||
|
a682570aed | ||
|
f94f7df9dd | ||
|
b346c7f55d | ||
|
ad9fd5d3c2 | ||
|
06490d966f | ||
|
cbfd8f1ba0 | ||
|
3b7e1b43ee | ||
|
6a498d7362 | ||
|
9905f6bea2 | ||
|
e06f380d3e | ||
|
86ad074cfc | ||
|
1a1b143f97 | ||
|
93bab08f44 | ||
|
cc57cbf77c | ||
|
a0976b04a7 | ||
|
3f6a1cf396 | ||
|
a2a515d28c | ||
|
53fee5689b | ||
|
20d3d043ef | ||
|
2fea350b99 | ||
|
da050cb60b | ||
|
56b2928a78 | ||
|
d44fa2260c | ||
|
ab8bdb2a6c | ||
|
4a84d77dfc | ||
|
57457741e6 | ||
|
7321143931 | ||
|
3d8db3a274 | ||
|
670d643dd0 | ||
|
ad3cf01bee | ||
|
e017637351 | ||
|
2cac91b92f | ||
|
7d065a6510 | ||
|
36d367b383 | ||
|
edfd0ac684 | ||
|
b2eb1bce93 | ||
|
420950fb88 | ||
|
d0f81c7b35 | ||
|
13ef30b5a6 | ||
|
0fc91aa855 | ||
|
afa245d215 | ||
|
6e840fb37b | ||
|
5dc63b71ad | ||
|
541ee9331d | ||
|
33a287a5b0 | ||
|
f43c2b3b00 | ||
|
389ac24d27 | ||
|
a4d93b9509 | ||
|
49047465cc | ||
|
3e5f1a7a11 | ||
|
91e71da253 | ||
|
00afc10b13 | ||
|
9d3d02cf9a | ||
|
0264de1daa | ||
|
bc7424d3a8 | ||
|
176e66b0dd | ||
|
807c437183 | ||
|
be7bbdc1c2 | ||
|
2776ebe50a | ||
|
81cf19dc0d | ||
|
b0029005d4 | ||
|
0b1f063b8b | ||
|
5276d4ed70 | ||
|
9c62bd2d3a | ||
|
fd615323ef | ||
|
db393218e9 | ||
|
8364ef029f | ||
|
87db715b83 | ||
|
768fd3b536 | ||
|
ac94bfb965 | ||
|
fc3836241f | ||
|
7623c3a7c7 | ||
|
f613c06ee0 | ||
|
09405cff32 | ||
|
91337019f1 | ||
|
84277ef035 | ||
|
238e799d63 | ||
|
e084d93ea0 | ||
|
854f8b5ebe | ||
|
d65e07b31e | ||
|
be549ed51f | ||
|
b07092d32e | ||
|
306010094e | ||
|
511ae36ac1 | ||
|
dd35ecd5cf | ||
|
828df8abe5 | ||
|
4138cb7303 | ||
|
5e446e5ef8 | ||
|
0382cf3262 | ||
|
afcda8b446 | ||
|
262b7bbb2b | ||
|
2271ecbc36 | ||
|
902c2c869f | ||
|
5a83fa86cd | ||
|
e5733e7c51 | ||
|
7e31650dbc | ||
|
e4a5e57024 | ||
|
21991ac762 | ||
|
4bd76fa205 | ||
|
b9b58f584e | ||
|
0042d2317d | ||
|
52d2aa8d82 | ||
|
1d1a4806c9 | ||
|
c992071152 | ||
|
c147d0633f | ||
|
c371dfe5fd | ||
|
a2f5c0647c | ||
|
6e3765042c | ||
|
22fc89df12 | ||
|
fd464ab01a | ||
|
0707b4e4dc | ||
|
ec826f1fa4 | ||
|
7e100893e1 | ||
|
d82c459819 | ||
|
81a706ce2a | ||
|
7002e2efdb | ||
|
fca615b4d7 | ||
|
991ef8e863 | ||
|
b70a6f14af | ||
|
0450714f19 | ||
|
80b0ba0869 | ||
|
452beb463f | ||
|
9701818b09 | ||
|
40715ccd5c | ||
|
e2c97c3309 | ||
|
df4e1d5423 | ||
|
b143585082 | ||
|
28723dd0ab | ||
|
506c692bc3 | ||
|
6bf8d4cf95 | ||
|
56ecd59dc5 | ||
|
915729325e | ||
|
b9c7b90c25 | ||
|
ca39bdb903 | ||
|
a4d6662b91 | ||
|
e5c01c9473 | ||
|
a40c33f579 | ||
|
4509ac7ae3 | ||
|
cef4d96021 | ||
|
f3cd3afcee | ||
|
1964905c79 | ||
|
e2ea7db8d4 | ||
|
3dbe7da360 | ||
|
23c48ab385 | ||
|
fdd83f954f | ||
|
fb1df09486 | ||
|
f31359993c | ||
|
a920b9c6ec | ||
|
684aa199f1 | ||
|
60d283ed0e | ||
|
b31c14a493 | ||
|
2411559428 | ||
|
40e3489f62 | ||
|
2134b22dfa | ||
|
10be4ba94e | ||
|
6a14399240 | ||
|
5714c63321 | ||
|
7439dce4ec | ||
|
bf6d6d6973 | ||
|
b43c0ab337 | ||
|
7f148b4d6e | ||
|
de01ba092b | ||
|
323c459df8 | ||
|
8d459ff7d2 | ||
|
1090dabd09 | ||
|
5048751ca7 | ||
|
9b5a8739fd | ||
|
4299ef14ee | ||
|
b529b962a6 | ||
|
f9d6932554 | ||
|
eab06e985e | ||
|
8e05625d5a | ||
|
988979df0e | ||
|
fc0eefc783 | ||
|
4ea0748509 | ||
|
3be368d994 | ||
|
c24849e653 | ||
|
b7db30e857 | ||
|
ef8492668c | ||
|
0d5b61ad4f | ||
|
d58f195eef | ||
|
6a3ec7dee9 | ||
|
c9a3aadf45 | ||
|
e7dc6e352a | ||
|
1727053980 | ||
|
bcce006013 | ||
|
3f1e69e79b | ||
|
1453d9e816 | ||
|
314894d4b1 | ||
|
b7b0d75d4f | ||
|
ef2e454d05 | ||
|
4a514a107e | ||
|
e39317239c | ||
|
02074c082e | ||
|
a28eeabda0 | ||
|
b284679745 | ||
|
8772aeaaeb | ||
|
9ac5305e06 | ||
|
b93ff6fa4a | ||
|
facd068675 | ||
|
71bddf96bd | ||
|
3430fa41b7 | ||
|
f7a15f6e97 | ||
|
339ad4a3c8 | ||
|
0e8b364c9c | ||
|
42f7862d46 | ||
|
95e12c7682 | ||
|
7f0a6c311d | ||
|
f4848a5846 | ||
|
c28ed7f2b8 | ||
|
783612bc10 | ||
|
81f0638c16 | ||
|
4e7a27f709 | ||
|
0615fb3b17 | ||
|
b136241659 | ||
|
e78d01b08d | ||
|
f865e54d5c | ||
|
9a6a8dbe7d | ||
|
4cce31c3ac | ||
|
0070732710 | ||
|
c2b84143e8 | ||
|
e4c98a528a | ||
|
a2cb1803f3 | ||
|
599b750df7 | ||
|
76249f9e8c | ||
|
481bf72977 | ||
|
586dbd5a28 | ||
|
8993fa093f | ||
|
6b70b3fe71 | ||
|
b89872c478 | ||
|
64d1f1dee3 | ||
|
c44694bc23 | ||
|
f5adf7fece | ||
|
43f51f7880 | ||
|
4fa345db21 | ||
|
44d2edbc34 | ||
|
5f29cb055c | ||
|
1653ec07a1 | ||
|
3a8f053a4b | ||
|
6fda19d4f1 | ||
|
c01c129700 | ||
|
c87aa430a1 | ||
|
7899d5d855 | ||
|
75acdcd0e0 | ||
|
7fab7e3767 | ||
|
c881a83fb3 | ||
|
92367efe5b | ||
|
1cc5e93c69 | ||
|
90d459f6ff | ||
|
3ad55fd0b1 | ||
|
ff5affa0eb | ||
|
7e05c4dfcc | ||
|
69d3e8c380 | ||
|
edd11bb1f9 | ||
|
88f4f60fa1 | ||
|
cafd06dd64 | ||
|
f045500c27 | ||
|
9445e66406 | ||
|
9c63efceff | ||
|
c079b288b8 | ||
|
3ad1ea6c7c | ||
|
c9f50cd604 | ||
|
1ce1e7c5f0 | ||
|
43b54da70f | ||
|
2aefb74fc2 | ||
|
a3cb7bc980 | ||
|
bbcb1e00c3 | ||
|
e644959f83 | ||
|
00f2f2854d | ||
|
e59cfbeac6 | ||
|
3196e07037 | ||
|
0db1d73ba0 | ||
|
1918c7f111 | ||
|
2776f434e8 | ||
|
51cf2ba979 | ||
|
c488e86141 | ||
|
39f4e55751 | ||
|
b3b1b4eceb | ||
|
37646c35c0 | ||
|
c434ebb053 | ||
|
c2b73ce278 | ||
|
81136b7664 | ||
|
a3e24eb92b | ||
|
1afbcb8179 | ||
|
53536c50ba | ||
|
9206e640a9 | ||
|
23a47cf34d | ||
|
dd9495596f | ||
|
d3a4f35c27 | ||
|
a3fce7978f | ||
|
4e62418c05 | ||
|
5c4a0321b1 | ||
|
a0ab978079 | ||
|
10b612861e | ||
|
281c0d8cb9 | ||
|
8ecb53e369 | ||
|
587ce89780 | ||
|
ea00309c13 | ||
|
eb786d255b | ||
|
59ed42061a | ||
|
6b6476298e | ||
|
e43c1128f2 | ||
|
506241eb5f | ||
|
ecafa29092 | ||
|
0809055837 | ||
|
bb08064c74 | ||
|
eb806b8188 | ||
|
6fe0c65528 | ||
|
344735ab09 | ||
|
3bb3920e6b | ||
|
97ef320eb3 | ||
|
65c8b0fd69 | ||
|
d79323a9f8 | ||
|
eb289c280e | ||
|
263b0ab219 | ||
|
cca44edcb8 | ||
|
39c489e6dc | ||
|
97c8bb12ef | ||
|
10b9d5accb | ||
|
c0e7deef2f | ||
|
81f89019fe | ||
|
6e95b9c976 | ||
|
fa9c58bfd2 | ||
|
2a62b4d5d3 | ||
|
670f98accd | ||
|
fe102073b5 | ||
|
d826d2fbe4 | ||
|
d0b2323637 | ||
|
e7d10cb18a | ||
|
0c92ee2933 | ||
|
f4b28a7e89 | ||
|
8b14c1fc1b | ||
|
e8dcdb4d2b | ||
|
3023e10fdf | ||
|
41be330e00 | ||
|
71d96c8f7a | ||
|
e6b8d8efcf | ||
|
8ae2ab867c | ||
|
ba2ddd4ddd | ||
|
81ad901115 | ||
|
2361b293c9 | ||
|
60017e7e4d | ||
|
2cd3998144 | ||
|
ad10713c3a | ||
|
ffefbc3acd | ||
|
5b57fcc7fa | ||
|
05bb4cf7ea | ||
|
ba7be1d0f5 | ||
|
401afe3942 | ||
|
2c6b4b9fb8 | ||
|
60cd3d3059 | ||
|
874e93f89d | ||
|
7d9f6d1a8d | ||
|
f3a4db9a79 | ||
|
98f6140c0f | ||
|
786042ecbb | ||
|
31d35ce869 | ||
|
de7bf79854 | ||
|
88de161365 | ||
|
16ec9ac0f2 | ||
|
afac0e7488 | ||
|
b5e8f07894 | ||
|
921eb151e6 | ||
|
45978a85db | ||
|
b01027d478 | ||
|
545f412a20 | ||
|
f6e0b095bc | ||
|
2f96647566 | ||
|
c7c76b27a8 | ||
|
fe0d423fb3 | ||
|
18296be278 | ||
|
1f10f726c8 | ||
|
eb7636ccec | ||
|
0dabccfa80 | ||
|
89d881a1dc | ||
|
fa3a09949e | ||
|
26e9413149 | ||
|
f43c2c77a9 | ||
|
575a09fc0b | ||
|
018be6c1b0 | ||
|
8e3683df2f | ||
|
ab609ef938 | ||
|
a02fea6542 | ||
|
9faa9e4b2d | ||
|
54c05ca159 | ||
|
31a6c6dafa | ||
|
181b138f6d | ||
|
68fef509bd | ||
|
0aea2563fe | ||
|
09742a64e3 | ||
|
5ac1206d31 | ||
|
5be297ad55 | ||
|
b2dfb20b1f | ||
|
d38538c7c9 | ||
|
d5876d9647 | ||
|
8d34f76002 | ||
|
18a3906c47 | ||
|
7632224165 | ||
|
66f39d0590 | ||
|
c372f7d841 | ||
|
414245193d | ||
|
fa5f451fa3 | ||
|
b58ef69647 | ||
|
aae913bdd8 | ||
|
c299ff7d44 | ||
|
af34d1979d | ||
|
96d5843826 | ||
|
96bb02aa75 | ||
|
60b18518b0 | ||
|
71e41ca034 | ||
|
4904450f83 | ||
|
41b434d6b2 | ||
|
305d73d62f | ||
|
ca21cb288e | ||
|
665e943f5b | ||
|
466babe353 | ||
|
d9cb5cc473 | ||
|
f36bf751dd | ||
|
67957806d9 | ||
|
92de289086 | ||
|
d913217fad | ||
|
50557608f3 | ||
|
c4efcf36b5 | ||
|
04254433fb | ||
|
9e040e3cd3 | ||
|
07f005d928 | ||
|
5b32dbecb7 | ||
|
b67e9c134b | ||
|
4a97312af6 |
|
@ -1,11 +1,47 @@
|
|||
|
||||
# We'll use defaults from the LLVM style, but with 4 columns indentation.
|
||||
BasedOnStyle: LLVM
|
||||
BasedOnStyle: Google
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignConsecutiveAssignments: 'false'
|
||||
AlignConsecutiveDeclarations: 'false'
|
||||
AlignEscapedNewlines: Left
|
||||
AlignOperands: AlignAfterOperator
|
||||
AlignTrailingComments: 'true'
|
||||
AllowAllArgumentsOnNextLine: 'true'
|
||||
AllowShortBlocksOnASingleLine: 'false'
|
||||
AllowShortCaseLabelsOnASingleLine: 'true'
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
AllowShortIfStatementsOnASingleLine: 'false'
|
||||
AllowShortLoopsOnASingleLine: 'false'
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakTemplateDeclarations: Yes
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Attach
|
||||
BreakBeforeTernaryOperators: 'true'
|
||||
BreakConstructorInitializers: AfterColon
|
||||
Cpp11BracedListStyle: 'true'
|
||||
KeepEmptyLinesAtTheStartOfBlocks: 'true'
|
||||
NamespaceIndentation: Inner
|
||||
CompactNamespaces: 'true'
|
||||
PenaltyBreakString: '3'
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpacesInAngles: 'false'
|
||||
SpacesInContainerLiterals: 'false'
|
||||
SpacesInParentheses: 'false'
|
||||
SpacesInSquareBrackets: 'false'
|
||||
Standard: c++17
|
||||
UseTab: Never
|
||||
SortIncludes: false
|
||||
ColumnLimit: 100
|
||||
IndentWidth: 4
|
||||
Language: Cpp
|
||||
# Force pointers to the type for C++.
|
||||
AccessModifierOffset: -2
|
||||
ConstructorInitializerIndentWidth: 8
|
||||
ContinuationIndentWidth: 8
|
||||
|
||||
|
||||
# treat pointers and reference declarations as if part of the type
|
||||
DerivePointerAlignment: false
|
||||
PointerAlignment: Left
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
|
||||
# when wrapping function calls/declarations, force each parameter to have its own line
|
||||
BinPackParameters: 'false'
|
||||
BinPackArguments: 'false'
|
||||
|
|
283
.drone.jsonnet
283
.drone.jsonnet
|
@ -1,133 +1,196 @@
|
|||
local default_deps_base = [
|
||||
'autoconf',
|
||||
'libboost-program-options-dev',
|
||||
'libcurl4-openssl-dev',
|
||||
'libjemalloc-dev',
|
||||
'libsodium-dev',
|
||||
'libsqlite3-dev',
|
||||
'libssl-dev',
|
||||
'libsystemd-dev',
|
||||
'make',
|
||||
'pkg-config',
|
||||
];
|
||||
local default_deps_nocxx = ['libsodium-dev'] + default_deps_base; // libsodium-dev needs to be >= 1.0.18
|
||||
local default_deps = ['g++'] + default_deps_nocxx; // g++ sometimes needs replacement
|
||||
local docker_base = 'registry.oxen.rocks/lokinet-ci-';
|
||||
|
||||
local default_deps_base='libsystemd-dev libboost-program-options-dev libboost-system-dev libboost-test-dev ' +
|
||||
'libsqlite3-dev libsodium-dev libssl-dev pkg-config';
|
||||
local default_deps='g++ ' + default_deps_base; // g++ sometimes needs replacement
|
||||
|
||||
local submodules_commands = ['git fetch --tags', 'git submodule update --init --recursive --depth=1'];
|
||||
local submodules_commands = ['git fetch --tags', 'git submodule update --init --recursive --depth=1 --jobs=4'];
|
||||
local submodules = {
|
||||
name: 'submodules',
|
||||
image: 'drone/git',
|
||||
commands: submodules_commands
|
||||
name: 'submodules',
|
||||
image: 'drone/git',
|
||||
commands: submodules_commands,
|
||||
};
|
||||
|
||||
local apt_get_quiet = 'apt-get -o=Dpkg::Use-Pty=0 -q';
|
||||
|
||||
local cmake_options(opts) = std.join(' ', [' -D' + o + '=' + (if opts[o] then 'ON' else 'OFF') for o in std.objectFields(opts)]) + ' ';
|
||||
|
||||
// Regular build on a debian-like system:
|
||||
local debian_pipeline(name, image,
|
||||
arch='amd64',
|
||||
deps=default_deps,
|
||||
build_type='Release',
|
||||
lto=false,
|
||||
build_tests=true,
|
||||
run_tests=false, # Runs full test suite
|
||||
cmake_extra='',
|
||||
extra_cmds=[],
|
||||
extra_steps=[],
|
||||
jobs=6,
|
||||
allow_fail=false) = {
|
||||
kind: 'pipeline',
|
||||
type: 'docker',
|
||||
name: name,
|
||||
platform: { arch: arch },
|
||||
steps: [
|
||||
submodules,
|
||||
{
|
||||
name: 'build',
|
||||
image: image,
|
||||
[if allow_fail then "failure"]: "ignore",
|
||||
environment: { SSH_KEY: { from_secret: "SSH_KEY" } },
|
||||
commands: [
|
||||
'echo "Building on ${DRONE_STAGE_MACHINE}"',
|
||||
'echo "man-db man-db/auto-update boolean false" | debconf-set-selections',
|
||||
apt_get_quiet + ' update',
|
||||
apt_get_quiet + ' install -y eatmydata',
|
||||
'eatmydata ' + apt_get_quiet + ' dist-upgrade -y',
|
||||
'eatmydata ' + apt_get_quiet + ' install -y --no-install-recommends cmake git ca-certificates ninja-build ccache '
|
||||
+ deps,
|
||||
'mkdir build',
|
||||
'cd build',
|
||||
'cmake .. -G Ninja -DCMAKE_CXX_FLAGS=-fdiagnostics-color=always -DCMAKE_BUILD_TYPE='+build_type+' ' +
|
||||
'-DLOCAL_MIRROR=https://oxen.rocks/deps -DUSE_LTO=' + (if lto then 'ON ' else 'OFF ') +
|
||||
(if build_tests || run_tests then '-DBUILD_TESTS=ON ' else '') +
|
||||
cmake_extra,
|
||||
'ninja -j' + jobs + ' -v',
|
||||
] +
|
||||
(if run_tests then ['./unit_test/Test'] else []) +
|
||||
extra_cmds,
|
||||
}
|
||||
] + extra_steps,
|
||||
local debian_pipeline(name,
|
||||
image,
|
||||
arch='amd64',
|
||||
deps=default_deps,
|
||||
build_type='Release',
|
||||
lto=false,
|
||||
werror=true,
|
||||
build_tests=true,
|
||||
run_tests=true, // Runs full test suite
|
||||
test_oxen_storage=true, // Makes sure oxen-storage --version runs
|
||||
cmake_extra='',
|
||||
extra_cmds=[],
|
||||
extra_steps=[],
|
||||
jobs=6,
|
||||
oxen_repo=false,
|
||||
allow_fail=false) = {
|
||||
kind: 'pipeline',
|
||||
type: 'docker',
|
||||
name: name,
|
||||
platform: { arch: arch },
|
||||
steps: [
|
||||
submodules,
|
||||
{
|
||||
name: 'build',
|
||||
image: image,
|
||||
pull: 'always',
|
||||
[if allow_fail then 'failure']: 'ignore',
|
||||
environment: { SSH_KEY: { from_secret: 'SSH_KEY' } },
|
||||
commands: [
|
||||
'echo "Building on ${DRONE_STAGE_MACHINE}"',
|
||||
'echo "man-db man-db/auto-update boolean false" | debconf-set-selections',
|
||||
apt_get_quiet + ' update',
|
||||
apt_get_quiet + ' install -y eatmydata',
|
||||
] + (
|
||||
if oxen_repo then [
|
||||
'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y lsb-release',
|
||||
'cp contrib/deb.oxen.io.gpg /etc/apt/trusted.gpg.d',
|
||||
'echo deb http://deb.oxen.io $$(lsb_release -sc) main >/etc/apt/sources.list.d/oxen.list',
|
||||
'eatmydata ' + apt_get_quiet + ' update',
|
||||
] else []
|
||||
) + [
|
||||
'eatmydata ' + apt_get_quiet + ' dist-upgrade -y',
|
||||
'eatmydata ' + apt_get_quiet + ' install -y --no-install-recommends cmake git ca-certificates ninja-build ccache '
|
||||
+ std.join(' ', deps),
|
||||
'mkdir build',
|
||||
'cd build',
|
||||
'cmake .. -G Ninja -DCMAKE_CXX_FLAGS=-fdiagnostics-color=always -DCMAKE_BUILD_TYPE=' + build_type
|
||||
+ ' -DLOCAL_MIRROR=https://oxen.rocks/deps -DEXTRA_WARNINGS=ON '
|
||||
+ cmake_options({ USE_LTO: lto, WARNINGS_AS_ERRORS: werror, BUILD_TESTS: build_tests || run_tests })
|
||||
+ cmake_extra,
|
||||
'ninja -j' + jobs + ' -v',
|
||||
] +
|
||||
(if test_oxen_storage then ['./oxen-storage --version'] else []) +
|
||||
(if run_tests then ['./unit_test/Test'] else []) +
|
||||
extra_cmds,
|
||||
},
|
||||
] + extra_steps,
|
||||
};
|
||||
|
||||
local clang(version, lto=false) = debian_pipeline(
|
||||
'Debian sid/clang-' + version + ' (amd64)',
|
||||
docker_base + 'debian-sid-clang',
|
||||
deps=['clang-' + version] + default_deps_nocxx,
|
||||
cmake_extra='-DCMAKE_C_COMPILER=clang-' + version + ' -DCMAKE_CXX_COMPILER=clang++-' + version + ' ',
|
||||
lto=lto
|
||||
);
|
||||
|
||||
// Macos build
|
||||
local mac_builder(name,
|
||||
build_type='Release',
|
||||
lto=false,
|
||||
build_tests=true,
|
||||
run_tests=false,
|
||||
cmake_extra='',
|
||||
extra_cmds=[],
|
||||
extra_steps=[],
|
||||
jobs=6,
|
||||
allow_fail=false) = {
|
||||
kind: 'pipeline',
|
||||
type: 'exec',
|
||||
name: name,
|
||||
platform: { os: 'darwin', arch: 'amd64' },
|
||||
steps: [
|
||||
{ name: 'submodules', commands: submodules_commands },
|
||||
{
|
||||
name: 'build',
|
||||
environment: { SSH_KEY: { from_secret: "SSH_KEY" } },
|
||||
commands: [
|
||||
// If you don't do this then the C compiler doesn't have an include path containing
|
||||
// basic system headers. WTF apple:
|
||||
'export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)"',
|
||||
'mkdir build',
|
||||
'cd build',
|
||||
'cmake .. -G Ninja -DCMAKE_CXX_FLAGS=-fcolor-diagnostics -DCMAKE_BUILD_TYPE='+build_type+' ' +
|
||||
'-DLOCAL_MIRROR=https://oxen.rocks/deps -DUSE_LTO=' + (if lto then 'ON ' else 'OFF ') +
|
||||
(if build_tests || run_tests then '-DBUILD_TESTS=ON ' else '') +
|
||||
cmake_extra,
|
||||
'ninja -j' + jobs + ' -v'
|
||||
] +
|
||||
(if run_tests then ['./unit_test/Test'] else []) +
|
||||
extra_cmds,
|
||||
}
|
||||
] + extra_steps
|
||||
build_type='Release',
|
||||
lto=false,
|
||||
werror=true,
|
||||
build_tests=true,
|
||||
run_tests=true,
|
||||
test_oxen_storage=true, // Makes sure oxen-storage --version runs
|
||||
cmake_extra='',
|
||||
extra_cmds=[],
|
||||
extra_steps=[],
|
||||
jobs=6,
|
||||
allow_fail=false) = {
|
||||
kind: 'pipeline',
|
||||
type: 'exec',
|
||||
name: name,
|
||||
platform: { os: 'darwin', arch: 'amd64' },
|
||||
steps: [
|
||||
{ name: 'submodules', commands: submodules_commands },
|
||||
{
|
||||
name: 'build',
|
||||
environment: { SSH_KEY: { from_secret: 'SSH_KEY' } },
|
||||
commands: [
|
||||
// If you don't do this then the C compiler doesn't have an include path containing
|
||||
// basic system headers. WTF apple:
|
||||
'export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)"',
|
||||
'mkdir build',
|
||||
'cd build',
|
||||
'cmake .. -G Ninja -DCMAKE_CXX_FLAGS=-fcolor-diagnostics -DCMAKE_BUILD_TYPE=' + build_type
|
||||
+ ' -DLOCAL_MIRROR=https://oxen.rocks/deps -DEXTRA_WARNINGS=ON '
|
||||
+ cmake_options({ USE_LTO: lto, WARNINGS_AS_ERRORS: werror, BUILD_TESTS: build_tests || run_tests })
|
||||
+ cmake_extra,
|
||||
'ninja -j' + jobs + ' -v',
|
||||
] +
|
||||
(if test_oxen_storage then ['./oxen-storage --version'] else []) +
|
||||
(if run_tests then ['./unit_test/Test'] else []) +
|
||||
extra_cmds,
|
||||
},
|
||||
] + extra_steps,
|
||||
};
|
||||
|
||||
local static_check_and_upload = [
|
||||
'../contrib/drone-check-static-libs.sh',
|
||||
'ninja strip',
|
||||
'ninja create_tarxz',
|
||||
'../contrib/drone-static-upload.sh'
|
||||
'../contrib/drone-check-static-libs.sh',
|
||||
'ninja strip',
|
||||
'ninja create_tarxz',
|
||||
'../contrib/drone-static-upload.sh',
|
||||
];
|
||||
|
||||
local static_build_deps='autoconf automake make file libtool pkg-config patch openssh-client';
|
||||
|
||||
|
||||
[
|
||||
// Various debian builds
|
||||
debian_pipeline("Debian (w/ tests) (amd64)", "debian:sid", lto=true, run_tests=true),
|
||||
debian_pipeline("Debian Debug (amd64)", "debian:sid", build_type='Debug'),
|
||||
debian_pipeline("Debian clang-11 (amd64)", "debian:sid", deps='clang-11 '+default_deps_base,
|
||||
cmake_extra='-DCMAKE_C_COMPILER=clang-11 -DCMAKE_CXX_COMPILER=clang++-11 ', lto=true),
|
||||
debian_pipeline("Debian buster (i386)", "i386/debian:buster"),
|
||||
debian_pipeline("Ubuntu focal (amd64)", "ubuntu:focal"),
|
||||
{
|
||||
name: 'lint check',
|
||||
kind: 'pipeline',
|
||||
type: 'docker',
|
||||
steps: [{
|
||||
name: 'build',
|
||||
image: docker_base + 'lint',
|
||||
pull: 'always',
|
||||
commands: [
|
||||
'echo "Building on ${DRONE_STAGE_MACHINE}"',
|
||||
apt_get_quiet + ' update',
|
||||
apt_get_quiet + ' install -y eatmydata',
|
||||
'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y git clang-format-14 jsonnet',
|
||||
'./contrib/drone-format-verify.sh',
|
||||
],
|
||||
}],
|
||||
},
|
||||
|
||||
// ARM builds (ARM64 and armhf)
|
||||
debian_pipeline("Debian (ARM64)", "debian:sid", arch="arm64", build_tests=false),
|
||||
debian_pipeline("Debian buster (armhf)", "arm32v7/debian:buster", arch="arm64", build_tests=false),
|
||||
// Various debian builds
|
||||
debian_pipeline('Debian (amd64)', docker_base + 'debian-sid', lto=true),
|
||||
debian_pipeline('Debian Debug (amd64)', docker_base + 'debian-sid', build_type='Debug'),
|
||||
clang(14, lto=true),
|
||||
debian_pipeline('Debian stable (i386)', docker_base + 'debian-stable/i386'),
|
||||
debian_pipeline('Ubuntu LTS (amd64)', docker_base + 'ubuntu-lts'),
|
||||
debian_pipeline('Ubuntu latest (amd64)', docker_base + 'ubuntu-rolling'),
|
||||
debian_pipeline('Debian buster (amd64)',
|
||||
docker_base + 'debian-buster',
|
||||
deps=default_deps_base + ['g++', 'file'],
|
||||
cmake_extra='-DDOWNLOAD_SODIUM=ON'),
|
||||
|
||||
// Static build (on bionic) which gets uploaded to oxen.rocks:
|
||||
debian_pipeline("Static (bionic amd64)", "ubuntu:bionic", deps='g++-8 '+static_build_deps,
|
||||
cmake_extra='-DBUILD_STATIC_DEPS=ON -DCMAKE_C_COMPILER=gcc-8 -DCMAKE_CXX_COMPILER=g++-8',
|
||||
build_tests=false, lto=true, extra_cmds=static_check_and_upload),
|
||||
// ARM builds (ARM64 and armhf)
|
||||
debian_pipeline('Debian sid (ARM64)', docker_base + 'debian-sid', arch='arm64'),
|
||||
debian_pipeline('Debian stable (armhf)', docker_base + 'debian-stable/arm32v7', arch='arm64'),
|
||||
|
||||
// Macos builds:
|
||||
mac_builder('macOS (Static)', cmake_extra='-DBUILD_STATIC_DEPS=ON',
|
||||
build_tests=false, lto=true, extra_cmds=static_check_and_upload),
|
||||
mac_builder('macOS (Release)', run_tests=true),
|
||||
mac_builder('macOS (Debug)', build_type='Debug', cmake_extra='-DBUILD_DEBUG_UTILS=ON'),
|
||||
// Static build (on bionic) which gets uploaded to oxen.rocks:
|
||||
debian_pipeline('Static (bionic amd64)',
|
||||
docker_base + 'ubuntu-bionic',
|
||||
deps=['autoconf', 'automake', 'file', 'g++-8', 'libtool', 'make', 'openssh-client', 'patch', 'pkg-config'],
|
||||
cmake_extra='-DBUILD_STATIC_DEPS=ON -DCMAKE_C_COMPILER=gcc-8 -DCMAKE_CXX_COMPILER=g++-8',
|
||||
lto=true,
|
||||
oxen_repo=true, // for updated cmake
|
||||
extra_cmds=static_check_and_upload),
|
||||
|
||||
// Macos builds:
|
||||
mac_builder('macOS (Static)',
|
||||
cmake_extra='-DBUILD_STATIC_DEPS=ON',
|
||||
lto=true,
|
||||
extra_cmds=static_check_and_upload),
|
||||
mac_builder('macOS (Release)'),
|
||||
mac_builder('macOS (Debug)', build_type='Debug'),
|
||||
]
|
||||
|
|
32
.gitmodules
vendored
32
.gitmodules
vendored
|
@ -1,9 +1,27 @@
|
|||
[submodule "vendors/spdlog"]
|
||||
path = vendors/spdlog
|
||||
url = https://github.com/gabime/spdlog.git
|
||||
[submodule "vendors/loki-mq"]
|
||||
path = vendors/loki-mq
|
||||
url = https://github.com/loki-project/loki-mq.git
|
||||
[submodule "vendors/oxen-mq"]
|
||||
path = external/oxen-mq
|
||||
url = https://github.com/oxen-io/oxen-mq.git
|
||||
[submodule "vendors/nlohmann_json"]
|
||||
path = vendors/nlohmann_json
|
||||
path = external/nlohmann_json
|
||||
url = https://github.com/nlohmann/json.git
|
||||
[submodule "vendors/uWebSockets"]
|
||||
path = external/uWebSockets
|
||||
url = https://github.com/uNetworking/uWebSockets.git
|
||||
[submodule "vendors/cpr"]
|
||||
path = external/cpr
|
||||
url = https://github.com/whoshuu/cpr.git
|
||||
[submodule "unit_test/Catch2"]
|
||||
path = unit_test/Catch2
|
||||
url = https://github.com/catchorg/Catch2
|
||||
[submodule "vendors/SQLiteCpp"]
|
||||
path = external/SQLiteCpp
|
||||
url = https://github.com/SRombauts/SQLiteCpp.git
|
||||
[submodule "vendors/oxenc"]
|
||||
path = external/oxenc
|
||||
url = https://github.com/oxen-io/oxen-encoding.git
|
||||
[submodule "vendors/CLI11"]
|
||||
path = external/CLI11
|
||||
url = https://github.com/CLIUtils/CLI11
|
||||
[submodule "external/oxen-logging"]
|
||||
path = external/oxen-logging
|
||||
url = https://github.com/oxen-io/oxen-logging.git
|
||||
|
|
|
@ -10,16 +10,12 @@ if(CCACHE_PROGRAM)
|
|||
endforeach()
|
||||
endif()
|
||||
|
||||
cmake_minimum_required(VERSION 3.10)
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
|
||||
project(storage_server
|
||||
VERSION 2.0.8
|
||||
project(oxenss
|
||||
VERSION 2.5.0
|
||||
LANGUAGES CXX C)
|
||||
|
||||
option(INTEGRATION_TEST "build for integration test" OFF)
|
||||
option(DISABLE_SNODE_SIGNATURE "Generate and verify signatures for inter-snode communication"
|
||||
OFF)
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
|
||||
set(CMAKE_CXX_EXTENSIONS FALSE)
|
||||
|
@ -41,18 +37,54 @@ else()
|
|||
set(IPO_ENABLED OFF)
|
||||
endif()
|
||||
|
||||
if (INTEGRATION_TEST)
|
||||
add_definitions(-DDISABLE_POW)
|
||||
add_definitions(-DINTEGRATION_TEST)
|
||||
endif()
|
||||
|
||||
|
||||
if (DISABLE_SNODE_SIGNATURE)
|
||||
add_definitions(-DDISABLE_SNODE_SIGNATURE)
|
||||
endif()
|
||||
|
||||
option(BUILD_TESTS "build storage server unit tests" OFF)
|
||||
|
||||
find_package(Git)
|
||||
option(MANUAL_SUBMODULES "Don't check for out-of-date submodules" OFF)
|
||||
if(NOT GIT_FOUND)
|
||||
message(WARNING "Git not found, unable to check that submodules are up-to-date")
|
||||
else()
|
||||
function (check_submodule relative_path)
|
||||
execute_process(COMMAND git rev-parse "HEAD" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${relative_path} OUTPUT_VARIABLE localHead)
|
||||
execute_process(COMMAND git rev-parse "HEAD:${relative_path}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_VARIABLE checkedHead)
|
||||
string(COMPARE EQUAL "${localHead}" "${checkedHead}" upToDate)
|
||||
if (upToDate)
|
||||
message(STATUS "Submodule '${relative_path}' is up-to-date")
|
||||
elseif(MANUAL_SUBMODULES)
|
||||
message(WARNING "Submodule '${relative_path}' is not up-to-date")
|
||||
else()
|
||||
message(FATAL_ERROR "Submodule '${relative_path}' is not up-to-date. Please update with\ngit submodule update --init --recursive\nor run cmake with -DMANUAL_SUBMODULES=1")
|
||||
endif()
|
||||
|
||||
# Extra arguments check nested submodules
|
||||
foreach(submod ${ARGN})
|
||||
execute_process(COMMAND git rev-parse "HEAD" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${relative_path}/${submod} OUTPUT_VARIABLE localHead)
|
||||
execute_process(COMMAND git rev-parse "HEAD:${submod}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/${relative_path} OUTPUT_VARIABLE checkedHead)
|
||||
string(COMPARE EQUAL "${localHead}" "${checkedHead}" upToDate)
|
||||
if (NOT upToDate)
|
||||
if(MANUAL_SUBMODULES)
|
||||
message(WARNING "Nested submodule '${relative_path}/${submod}' is not up-to-date")
|
||||
else()
|
||||
message(FATAL_ERROR "Nested submodule '${relative_path}/${submod}' is not up-to-date. Please update with\ngit submodule update --init --recursive\nor run cmake with -DMANUAL_SUBMODULES=1")
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction ()
|
||||
|
||||
message(STATUS "Checking submodules")
|
||||
check_submodule(external/oxenc)
|
||||
check_submodule(external/oxen-mq cppzmq)
|
||||
check_submodule(external/oxen-logging fmt spdlog)
|
||||
check_submodule(external/nlohmann_json)
|
||||
check_submodule(external/uWebSockets uSockets)
|
||||
check_submodule(external/cpr)
|
||||
check_submodule(external/CLI11)
|
||||
if(BUILD_TESTS)
|
||||
check_submodule(unit_test/Catch2)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
list (APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
|
||||
|
||||
option(BUILD_STATIC_DEPS "Download, build and statically link against core dependencies" OFF)
|
||||
|
@ -60,34 +92,26 @@ if(BUILD_STATIC_DEPS)
|
|||
include(StaticBuild)
|
||||
else()
|
||||
find_package(PkgConfig REQUIRED)
|
||||
pkg_check_modules(SODIUM REQUIRED IMPORTED_TARGET libsodium>=1.0.17)
|
||||
add_library(sodium INTERFACE)
|
||||
target_link_libraries(sodium INTERFACE PkgConfig::SODIUM)
|
||||
|
||||
# Need this target export so that loki-mq properly picks up sodium
|
||||
export(TARGETS sodium NAMESPACE sodium:: FILE sodium-exports.cmake)
|
||||
|
||||
find_package(Boost REQUIRED system program_options)
|
||||
|
||||
find_package(OpenSSL REQUIRED)
|
||||
endif()
|
||||
|
||||
include(cmake/check_atomic.cmake)
|
||||
link_libatomic()
|
||||
|
||||
include(cmake/check_for_std_filesystem.cmake)
|
||||
|
||||
if(NOT BUILD_STATIC_DEPS)
|
||||
endif()
|
||||
add_subdirectory(external)
|
||||
|
||||
add_subdirectory(oxenss)
|
||||
|
||||
add_subdirectory(common)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(crypto)
|
||||
add_subdirectory(pow)
|
||||
add_subdirectory(storage)
|
||||
add_subdirectory(httpserver)
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "disable shared libraries") # Tells loki-mq to do a static build
|
||||
add_subdirectory(vendors/loki-mq)
|
||||
|
||||
if (BUILD_TESTS)
|
||||
add_subdirectory(unit_test)
|
||||
endif ()
|
||||
|
||||
add_executable(onion-request EXCLUDE_FROM_ALL contrib/onion-request.cpp)
|
||||
set_target_properties(onion-request PROPERTIES RUNTIME_OUTPUT_DIRECTORY contrib)
|
||||
target_link_libraries(onion-request common crypto cpr::cpr oxenmq::oxenmq)
|
||||
target_include_directories(onion-request PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
include(cmake/archive.cmake)
|
||||
|
|
75
Dockerfile
75
Dockerfile
|
@ -1,75 +0,0 @@
|
|||
FROM ubuntu:bionic
|
||||
|
||||
RUN apt update && apt install -y build-essential curl git cmake libssl-dev libsodium-dev wget pkg-config autoconf libtool g++-8
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
## Boost
|
||||
ARG BOOST_VERSION=1_70_0
|
||||
ARG BOOST_VERSION_DOT=1.70.0
|
||||
ARG BOOST_HASH=430ae8354789de4fd19ee52f3b1f739e1fba576f0aded0897c3c2bc00fb38778
|
||||
RUN set -ex \
|
||||
&& curl -s -L -o boost_${BOOST_VERSION}.tar.bz2 https://dl.bintray.com/boostorg/release/${BOOST_VERSION_DOT}/source/boost_${BOOST_VERSION}.tar.bz2 \
|
||||
&& echo "${BOOST_HASH} boost_${BOOST_VERSION}.tar.bz2" | sha256sum -c \
|
||||
&& tar -xvf boost_${BOOST_VERSION}.tar.bz2 \
|
||||
&& cd boost_${BOOST_VERSION} \
|
||||
&& ./bootstrap.sh \
|
||||
&& ./b2 --build-type=minimal link=static runtime-link=static --with-chrono --with-date_time --with-filesystem --with-program_options --with-regex --with-serialization --with-system --with-thread --with-locale threading=multi threadapi=pthread cflags="-fPIC" cxxstd=14 cxxflags="-fPIC" stage
|
||||
ENV BOOST_ROOT /usr/local/boost_${BOOST_VERSION}
|
||||
|
||||
# OpenSSL
|
||||
ARG OPENSSL_VERSION=1.1.1c
|
||||
ARG OPENSSL_HASH=f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90
|
||||
RUN set -ex \
|
||||
&& curl -s -O https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz \
|
||||
&& echo "${OPENSSL_HASH} openssl-${OPENSSL_VERSION}.tar.gz" | sha256sum -c \
|
||||
&& tar -xzf openssl-${OPENSSL_VERSION}.tar.gz \
|
||||
&& cd openssl-${OPENSSL_VERSION} \
|
||||
&& ./Configure linux-x86_64 no-shared --static -fPIC \
|
||||
&& make build_generated \
|
||||
&& make libcrypto.a \
|
||||
&& make install
|
||||
ENV OPENSSL_ROOT_DIR=/usr/local/openssl-${OPENSSL_VERSION}
|
||||
|
||||
# Sodium
|
||||
ARG SODIUM_VERSION=1.0.18
|
||||
ARG SODIUM_HASH=4f5e89fa84ce1d178a6765b8b46f2b6f91216677
|
||||
RUN set -ex \
|
||||
&& git clone https://github.com/jedisct1/libsodium.git -b ${SODIUM_VERSION} --depth=1 \
|
||||
&& cd libsodium \
|
||||
&& test `git rev-parse HEAD` = ${SODIUM_HASH} || exit 1 \
|
||||
&& ./autogen.sh \
|
||||
&& CFLAGS="-fPIC" CXXFLAGS="-fPIC" ./configure \
|
||||
&& make \
|
||||
&& make check \
|
||||
&& make install
|
||||
|
||||
RUN apt-get install -y apt-transport-https ca-certificates gnupg software-properties-common wget
|
||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | apt-key add -
|
||||
RUN apt-add-repository 'deb https://apt.kitware.com/ubuntu/ xenial main'
|
||||
|
||||
RUN apt-get update
|
||||
|
||||
RUN apt-get install -y kitware-archive-keyring
|
||||
RUN apt-key --keyring /etc/apt/trusted.gpg del C1F34CDD40CD72DA
|
||||
|
||||
RUN apt-get install -y cmake
|
||||
|
||||
ADD https://api.github.com/repos/loki-project/loki-storage-server/git/refs/heads/master version.json
|
||||
|
||||
RUN rm -rf loki-storage-server
|
||||
|
||||
RUN git clone https://github.com/loki-project/loki-storage-server.git --depth=1
|
||||
|
||||
RUN cd loki-storage-server && git submodule update --init --recursive
|
||||
|
||||
ENV BOOST_ROOT /usr/src/app/boost_${BOOST_VERSION}
|
||||
|
||||
ENV CC=gcc-8 CXX=g++-8
|
||||
|
||||
RUN cd loki-storage-server \
|
||||
&& mkdir -p build \
|
||||
&& cd build \
|
||||
&& cmake .. -DBOOST_ROOT=$BOOST_ROOT -Dsodium_USE_STATIC_LIBS=ON \
|
||||
&& cmake --build . -- -j8
|
||||
|
||||
RUN loki-storage-server/build/httpserver/loki-storage --version
|
69
Makefile
69
Makefile
|
@ -1,69 +0,0 @@
|
|||
SUB_DIR:=$(shell echo `uname | sed -e 's|[:/\\ \(\)]|_|g'`/`git branch | grep '\* ' | cut -f2- -d' '| sed -e 's|[:/\\ \(\)]|_|g'`)
|
||||
|
||||
ifeq ($(DEBUG),)
|
||||
BUILD_TYPE := Release
|
||||
else
|
||||
BUILD_TYPE := Debug
|
||||
endif
|
||||
|
||||
ifeq ($(USE_SINGLE_BUILD_DIR),)
|
||||
BUILD_DIR := build/$(SUB_DIR)/$(BUILD_TYPE)
|
||||
TOP_DIR := ../../../..
|
||||
else
|
||||
BUILD_DIR := build
|
||||
TOP_DIR := ..
|
||||
endif
|
||||
|
||||
ifeq ($(GEN),)
|
||||
CMAKE := cmake
|
||||
else
|
||||
CMAKE := cmake -G$(GEN)
|
||||
endif
|
||||
|
||||
BUILD_TESTS ?= ON
|
||||
|
||||
BUILD_STATIC ?= ON
|
||||
|
||||
MKDIR := mkdir -p $(BUILD_DIR) && cd $(BUILD_DIR)
|
||||
|
||||
all:
|
||||
$(MKDIR) && \
|
||||
$(CMAKE) \
|
||||
-DBoost_USE_STATIC_LIBS=$(BUILD_STATIC) \
|
||||
-DOPENSSL_USE_STATIC_LIBS=$(BUILD_STATIC) \
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-DBUILD_TESTS=$(BUILD_TESTS) \
|
||||
-DDISABLE_SNODE_SIGNATURE=OFF \
|
||||
$(TOP_DIR) \
|
||||
&& cmake --build .
|
||||
|
||||
integration-test:
|
||||
$(MKDIR) && \
|
||||
$(CMAKE) $(TOP_DIR) \
|
||||
-DBoost_USE_STATIC_LIBS=$(BUILD_STATIC) \
|
||||
-DOPENSSL_USE_STATIC_LIBS=$(BUILD_STATIC) \
|
||||
-DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-DBUILD_TESTS=$(BUILD_TESTS) \
|
||||
-DINTEGRATION_TEST=ON \
|
||||
&& cmake --build .
|
||||
|
||||
tests: all
|
||||
./$(BUILD_DIR)/unit_test/Test --log_level=all
|
||||
|
||||
clean:
|
||||
rm -rf build/$(SUB_DIR)
|
||||
|
||||
clean-all:
|
||||
rm -rf build
|
||||
|
||||
format:
|
||||
clang-format -style=file -i --verbose \
|
||||
httpserver/*.cpp httpserver/*.h \
|
||||
crypto/**/*.cpp crypto/**/*.hpp crypto/**/*.h \
|
||||
pow/**/*.cpp pow/**/*.hpp \
|
||||
storage/**/*.cpp storage/**/*.hpp \
|
||||
utils/**/*.cpp utils/**/*.hpp \
|
||||
unit_test/*.cpp \
|
||||
common/**/*.cpp common/**/*.h \
|
||||
|
||||
.PHONY: all clean format rebuild
|
85
README.md
85
README.md
|
@ -1,55 +1,54 @@
|
|||
# loki-storage-server
|
||||
Storage server for Loki Service Nodes
|
||||
# Oxen Storage Server
|
||||
|
||||
Storage server for Oxen Service Nodes
|
||||
|
||||
## Binary releases
|
||||
|
||||
Pre-built releases (with system service files) are available for Ubuntu/Debian on
|
||||
https://deb.oxen.io and are recommended for simple deployment and updates on those distributions.
|
||||
|
||||
## Building from source
|
||||
|
||||
The default build compiles for the current system and requires the following be installed (including
|
||||
headers/dev packages for the libraries):
|
||||
|
||||
Requirements:
|
||||
* Boost >= 1.66 (for boost.beast)
|
||||
* OpenSSL >= 1.1.1a (for X25519 curves)
|
||||
* sodium >= 1.0.17 (for ed25119 to curve25519 conversion)
|
||||
* cmake >= 3.10
|
||||
* OpenSSL >= 1.1.1
|
||||
* libsodium >= 1.0.17
|
||||
* pkg-config (any version)
|
||||
* libcurl
|
||||
* jemalloc (not strictly required but recommended for reduced long-term memory use)
|
||||
* autoconf (for building jemalloc)
|
||||
|
||||
You can, however, download and build static versions these dependencies
|
||||
as part of the build by adding the `-DBUILD_STATIC_DEPS=ON` option to cmake.
|
||||
Other dependencies will be used from the system if found, but if not found will be compiled and
|
||||
built statically from bundled versions:
|
||||
* spdlog >= 1.8
|
||||
* libzmq >= 4.3
|
||||
* oxen-mq >= 1.2.6
|
||||
* oxen-encoding >= 1.0.1
|
||||
* sqlite >= 3.35.5
|
||||
|
||||
You can, however, instruct the build to download and build static versions of all of these
|
||||
dependencies (other than autoconf) as part of the build by adding the `-DBUILD_STATIC_DEPS=ON`
|
||||
option to the `cmake` command below. (This will, however, result in a slower build and larger,
|
||||
slower binary, as is typical for static builds).
|
||||
|
||||
Can use `RelWithDebInfo` instead of `Release` if you want to include debug symbols to provide developers with valueable core dumps from crashes.
|
||||
Also make sure you don't have an older (than 4.3.0) libzmq header in /usr/local/include, if so please install a new version.
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
mkdir build && cd build
|
||||
cmake -DDISABLE_SNODE_SIGNATURE=OFF -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build .
|
||||
./loki-storage 0.0.0.0 8080
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j4
|
||||
```
|
||||
|
||||
The paths for Boost and OpenSSL can be specified by exporting the variables in the terminal before running `make`:
|
||||
```
|
||||
export OPENSSL_ROOT_DIR = ...
|
||||
export BOOST_ROOT= ...
|
||||
```
|
||||
The build will produce a `./build/httpserver/oxen-storage` binary. You can run it with `--help` to
|
||||
see supported run-time options.
|
||||
|
||||
Then using something like Postman (https://www.getpostman.com/) you can hit the API:
|
||||
# Running
|
||||
|
||||
# post data
|
||||
```
|
||||
HTTP POST http://127.0.0.1/store
|
||||
body: "hello world"
|
||||
headers:
|
||||
- X-Loki-recipient: "mypubkey"
|
||||
- X-Loki-ttl: "86400"
|
||||
- X-Loki-timestamp: "1540860811000"
|
||||
- X-Loki-pow-nonce: "xxxx..."
|
||||
```
|
||||
# get data
|
||||
```
|
||||
HTTP GET http://127.0.0.1/retrieve
|
||||
headers:
|
||||
- X-Loki-recipient: "mypubkey"
|
||||
- X-Loki-last-hash: "" (optional)
|
||||
```
|
||||
Oxen Storage Server is a required component of an Oxen Service Node and needs to talk to a running
|
||||
`oxend` in order to join the network. The program defaults are designed to work with a default
|
||||
oxend, but for advanced configurations (e.g. to run on different ports) you may need to use other
|
||||
options. Run the program with `--help` to see all available options.
|
||||
|
||||
# unit tests
|
||||
```
|
||||
mkdir build_test
|
||||
cd build_test
|
||||
cmake ../unit_test -DBOOST_ROOT="path to boost" -DOPENSSL_ROOT_DIR="path to openssl"
|
||||
cmake --build .
|
||||
./Test --log_level=all
|
||||
```
|
||||
See https://docs.oxen.io/ for additional details on setting up and running an Oxen Service Node.
|
||||
|
|
51
cmake/DownloadLibSodium.cmake
Normal file
51
cmake/DownloadLibSodium.cmake
Normal file
|
@ -0,0 +1,51 @@
|
|||
|
||||
set(LIBSODIUM_PREFIX ${CMAKE_BINARY_DIR}/libsodium)
|
||||
set(LIBSODIUM_URL https://github.com/jedisct1/libsodium/releases/download/1.0.18-RELEASE/libsodium-1.0.18.tar.gz https://download.libsodium.org/libsodium/releases/libsodium-1.0.18.tar.gz)
|
||||
set(LIBSODIUM_HASH SHA512=17e8638e46d8f6f7d024fe5559eccf2b8baf23e143fadd472a7d29d228b186d86686a5e6920385fe2020729119a5f12f989c3a782afbd05a8db4819bb18666ef)
|
||||
|
||||
if(SODIUM_TARBALL_URL)
|
||||
# make a build time override of the tarball url so we can fetch it if the original link goes away
|
||||
set(LIBSODIUM_URL ${SODIUM_TARBALL_URL})
|
||||
endif()
|
||||
|
||||
|
||||
file(MAKE_DIRECTORY ${LIBSODIUM_PREFIX}/include)
|
||||
|
||||
include(ExternalProject)
|
||||
include(ProcessorCount)
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
if(PROCESSOR_COUNT EQUAL 0)
|
||||
set(PROCESSOR_COUNT 1)
|
||||
endif()
|
||||
|
||||
set(sodium_cc ${CMAKE_C_COMPILER})
|
||||
if(CCACHE_PROGRAM)
|
||||
set(sodium_cc "${CCACHE_PROGRAM} ${sodium_cc}")
|
||||
endif()
|
||||
set(SODIUM_CONFIGURE ./configure --prefix=${LIBSODIUM_PREFIX} --enable-static --disable-shared --with-pic --quiet CC=${sodium_cc})
|
||||
if (CMAKE_C_COMPILER_ARG1)
|
||||
set(SODIUM_CONFIGURE ${SODIUM_CONFIGURE} CPPFLAGS=${CMAKE_C_COMPILER_ARG1})
|
||||
endif()
|
||||
|
||||
if (CROSS_TARGET)
|
||||
set(SODIUM_CONFIGURE ${SODIUM_CONFIGURE} --target=${CROSS_TARGET} --host=${CROSS_TARGET})
|
||||
endif()
|
||||
|
||||
|
||||
ExternalProject_Add(libsodium_external
|
||||
BUILD_IN_SOURCE ON
|
||||
PREFIX ${LIBSODIUM_PREFIX}
|
||||
URL ${LIBSODIUM_URL}
|
||||
URL_HASH ${LIBSODIUM_HASH}
|
||||
CONFIGURE_COMMAND ${SODIUM_CONFIGURE}
|
||||
BUILD_COMMAND make -j${PROCESSOR_COUNT}
|
||||
INSTALL_COMMAND ${MAKE}
|
||||
BUILD_BYPRODUCTS ${LIBSODIUM_PREFIX}/lib/libsodium.a ${LIBSODIUM_PREFIX}/include
|
||||
)
|
||||
|
||||
add_library(sodium_vendor STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(sodium_vendor libsodium_external)
|
||||
set_target_properties(sodium_vendor PROPERTIES
|
||||
IMPORTED_LOCATION ${LIBSODIUM_PREFIX}/lib/libsodium.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${LIBSODIUM_PREFIX}/include
|
||||
)
|
|
@ -1,288 +0,0 @@
|
|||
# Written in 2016 by Henrik Steffen Gaßmann <henrik@gassmann.onl>
|
||||
#
|
||||
# To the extent possible under law, the author(s) have dedicated all
|
||||
# copyright and related and neighboring rights to this software to the
|
||||
# public domain worldwide. This software is distributed without any warranty.
|
||||
#
|
||||
# You should have received a copy of the CC0 Public Domain Dedication
|
||||
# along with this software. If not, see
|
||||
#
|
||||
# http://creativecommons.org/publicdomain/zero/1.0/
|
||||
#
|
||||
########################################################################
|
||||
# Tries to find the local libsodium installation.
|
||||
#
|
||||
# On Windows the sodium_DIR environment variable is used as a default
|
||||
# hint which can be overridden by setting the corresponding cmake variable.
|
||||
#
|
||||
# Once done the following variables will be defined:
|
||||
#
|
||||
# sodium_FOUND
|
||||
# sodium_INCLUDE_DIR
|
||||
# sodium_LIBRARY_DEBUG
|
||||
# sodium_LIBRARY_RELEASE
|
||||
#
|
||||
#
|
||||
# Furthermore an imported "sodium" target is created.
|
||||
#
|
||||
|
||||
if (CMAKE_C_COMPILER_ID STREQUAL "GNU"
|
||||
OR CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
set(_GCC_COMPATIBLE 1)
|
||||
endif()
|
||||
|
||||
# static library option
|
||||
if (NOT DEFINED sodium_USE_STATIC_LIBS)
|
||||
option(sodium_USE_STATIC_LIBS "enable to statically link against sodium" OFF)
|
||||
endif()
|
||||
if(NOT (sodium_USE_STATIC_LIBS EQUAL sodium_USE_STATIC_LIBS_LAST))
|
||||
unset(sodium_LIBRARY CACHE)
|
||||
unset(sodium_LIBRARY_DEBUG CACHE)
|
||||
unset(sodium_LIBRARY_RELEASE CACHE)
|
||||
unset(sodium_DLL_DEBUG CACHE)
|
||||
unset(sodium_DLL_RELEASE CACHE)
|
||||
set(sodium_USE_STATIC_LIBS_LAST ${sodium_USE_STATIC_LIBS} CACHE INTERNAL "internal change tracking variable")
|
||||
endif()
|
||||
|
||||
|
||||
########################################################################
|
||||
# UNIX
|
||||
if (UNIX)
|
||||
# import pkg-config
|
||||
find_package(PkgConfig QUIET)
|
||||
if (PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(sodium_PKG QUIET libsodium)
|
||||
endif()
|
||||
|
||||
if(sodium_USE_STATIC_LIBS)
|
||||
foreach(_libname ${sodium_PKG_STATIC_LIBRARIES})
|
||||
if (NOT _libname MATCHES "^lib.*\\.a$") # ignore strings already ending with .a
|
||||
list(INSERT sodium_PKG_STATIC_LIBRARIES 0 "lib${_libname}.a")
|
||||
endif()
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES sodium_PKG_STATIC_LIBRARIES)
|
||||
|
||||
# if pkgconfig for libsodium doesn't provide
|
||||
# static lib info, then override PKG_STATIC here..
|
||||
if (sodium_PKG_STATIC_LIBRARIES STREQUAL "")
|
||||
set(sodium_PKG_STATIC_LIBRARIES libsodium.a)
|
||||
endif()
|
||||
|
||||
set(XPREFIX sodium_PKG_STATIC)
|
||||
else()
|
||||
if (sodium_PKG_LIBRARIES STREQUAL "")
|
||||
set(sodium_PKG_LIBRARIES sodium)
|
||||
endif()
|
||||
|
||||
set(XPREFIX sodium_PKG)
|
||||
endif()
|
||||
|
||||
find_path(sodium_INCLUDE_DIR sodium.h
|
||||
HINTS ${${XPREFIX}_INCLUDE_DIRS}
|
||||
)
|
||||
find_library(sodium_LIBRARY_DEBUG NAMES ${${XPREFIX}_LIBRARIES}
|
||||
HINTS ${${XPREFIX}_LIBRARY_DIRS}
|
||||
)
|
||||
find_library(sodium_LIBRARY_RELEASE NAMES ${${XPREFIX}_LIBRARIES}
|
||||
HINTS ${${XPREFIX}_LIBRARY_DIRS}
|
||||
)
|
||||
|
||||
|
||||
########################################################################
|
||||
# Windows
|
||||
elseif (WIN32)
|
||||
set(sodium_DIR "$ENV{sodium_DIR}" CACHE FILEPATH "sodium install directory")
|
||||
mark_as_advanced(sodium_DIR)
|
||||
|
||||
find_path(sodium_INCLUDE_DIR sodium.h
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES include
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
# detect target architecture
|
||||
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/arch.c" [=[
|
||||
#if defined _M_IX86
|
||||
#error ARCH_VALUE x86_32
|
||||
#elif defined _M_X64
|
||||
#error ARCH_VALUE x86_64
|
||||
#endif
|
||||
#error ARCH_VALUE unknown
|
||||
]=])
|
||||
try_compile(_UNUSED_VAR "${CMAKE_CURRENT_BINARY_DIR}" "${CMAKE_CURRENT_BINARY_DIR}/arch.c"
|
||||
OUTPUT_VARIABLE _COMPILATION_LOG
|
||||
)
|
||||
string(REGEX REPLACE ".*ARCH_VALUE ([a-zA-Z0-9_]+).*" "\\1" _TARGET_ARCH "${_COMPILATION_LOG}")
|
||||
|
||||
# construct library path
|
||||
if (_TARGET_ARCH STREQUAL "x86_32")
|
||||
string(APPEND _PLATFORM_PATH "Win32")
|
||||
elseif(_TARGET_ARCH STREQUAL "x86_64")
|
||||
string(APPEND _PLATFORM_PATH "x64")
|
||||
else()
|
||||
message(FATAL_ERROR "the ${_TARGET_ARCH} architecture is not supported by Findsodium.cmake.")
|
||||
endif()
|
||||
string(APPEND _PLATFORM_PATH "/$$CONFIG$$")
|
||||
|
||||
if (MSVC_VERSION LESS 1900)
|
||||
math(EXPR _VS_VERSION "${MSVC_VERSION} / 10 - 60")
|
||||
else()
|
||||
math(EXPR _VS_VERSION "${MSVC_VERSION} / 10 - 50")
|
||||
endif()
|
||||
string(APPEND _PLATFORM_PATH "/v${_VS_VERSION}")
|
||||
|
||||
if (sodium_USE_STATIC_LIBS)
|
||||
string(APPEND _PLATFORM_PATH "/static")
|
||||
else()
|
||||
string(APPEND _PLATFORM_PATH "/dynamic")
|
||||
endif()
|
||||
|
||||
string(REPLACE "$$CONFIG$$" "Debug" _DEBUG_PATH_SUFFIX "${_PLATFORM_PATH}")
|
||||
string(REPLACE "$$CONFIG$$" "Release" _RELEASE_PATH_SUFFIX "${_PLATFORM_PATH}")
|
||||
|
||||
find_library(sodium_LIBRARY_DEBUG libsodium.lib
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES ${_DEBUG_PATH_SUFFIX}
|
||||
)
|
||||
find_library(sodium_LIBRARY_RELEASE libsodium.lib
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES ${_RELEASE_PATH_SUFFIX}
|
||||
)
|
||||
if (NOT sodium_USE_STATIC_LIBS)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES_BCK ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".dll")
|
||||
find_library(sodium_DLL_DEBUG libsodium
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES ${_DEBUG_PATH_SUFFIX}
|
||||
)
|
||||
find_library(sodium_DLL_RELEASE libsodium
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES ${_RELEASE_PATH_SUFFIX}
|
||||
)
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES_BCK})
|
||||
endif()
|
||||
|
||||
elseif(_GCC_COMPATIBLE)
|
||||
if (sodium_USE_STATIC_LIBS)
|
||||
find_library(sodium_LIBRARY_DEBUG libsodium.a
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
)
|
||||
find_library(sodium_LIBRARY_RELEASE libsodium.a
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
)
|
||||
else()
|
||||
find_library(sodium_LIBRARY_DEBUG libsodium.dll.a
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
)
|
||||
find_library(sodium_LIBRARY_RELEASE libsodium.dll.a
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
)
|
||||
|
||||
file(GLOB _DLL
|
||||
LIST_DIRECTORIES false
|
||||
RELATIVE "${sodium_DIR}/bin"
|
||||
"${sodium_DIR}/bin/libsodium*.dll"
|
||||
)
|
||||
find_library(sodium_DLL_DEBUG ${_DLL} libsodium
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES bin
|
||||
)
|
||||
find_library(sodium_DLL_RELEASE ${_DLL} libsodium
|
||||
HINTS ${sodium_DIR}
|
||||
PATH_SUFFIXES bin
|
||||
)
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "this platform is not supported by FindSodium.cmake")
|
||||
endif()
|
||||
|
||||
|
||||
########################################################################
|
||||
# unsupported
|
||||
else()
|
||||
message(FATAL_ERROR "this platform is not supported by FindSodium.cmake")
|
||||
endif()
|
||||
|
||||
|
||||
########################################################################
|
||||
# common stuff
|
||||
|
||||
# extract sodium version
|
||||
if (sodium_INCLUDE_DIR)
|
||||
set(_VERSION_HEADER "${sodium_INCLUDE_DIR}/sodium/version.h")
|
||||
if (EXISTS ${_VERSION_HEADER})
|
||||
file(READ "${_VERSION_HEADER}" _VERSION_HEADER_CONTENT)
|
||||
string(REGEX REPLACE ".*#[ \t]*define[ \t]*SODIUM_VERSION_STRING[ \t]*\"([^\n]*)\".*" "\\1"
|
||||
sodium_VERSION "${_VERSION_HEADER_CONTENT}")
|
||||
set(sodium_VERSION "${sodium_VERSION}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# communicate results
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(sodium
|
||||
REQUIRED_VARS
|
||||
sodium_LIBRARY_RELEASE
|
||||
sodium_LIBRARY_DEBUG
|
||||
sodium_INCLUDE_DIR
|
||||
VERSION_VAR
|
||||
sodium_VERSION
|
||||
)
|
||||
|
||||
# mark file paths as advanced
|
||||
mark_as_advanced(sodium_INCLUDE_DIR)
|
||||
mark_as_advanced(sodium_LIBRARY_DEBUG)
|
||||
mark_as_advanced(sodium_LIBRARY_RELEASE)
|
||||
if (WIN32)
|
||||
mark_as_advanced(sodium_DLL_DEBUG)
|
||||
mark_as_advanced(sodium_DLL_RELEASE)
|
||||
endif()
|
||||
|
||||
# create imported target
|
||||
if(sodium_USE_STATIC_LIBS)
|
||||
set(_LIB_TYPE STATIC)
|
||||
else()
|
||||
set(_LIB_TYPE SHARED)
|
||||
endif()
|
||||
add_library(sodium ${_LIB_TYPE} IMPORTED)
|
||||
|
||||
set_target_properties(sodium PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${sodium_INCLUDE_DIR}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
)
|
||||
|
||||
if (sodium_USE_STATIC_LIBS)
|
||||
set_target_properties(sodium PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS "SODIUM_STATIC"
|
||||
IMPORTED_LOCATION "${sodium_LIBRARY_RELEASE}"
|
||||
IMPORTED_LOCATION_DEBUG "${sodium_LIBRARY_DEBUG}"
|
||||
)
|
||||
else()
|
||||
if (UNIX)
|
||||
set_target_properties(sodium PROPERTIES
|
||||
IMPORTED_LOCATION "${sodium_LIBRARY_RELEASE}"
|
||||
IMPORTED_LOCATION_DEBUG "${sodium_LIBRARY_DEBUG}"
|
||||
)
|
||||
elseif (WIN32)
|
||||
set_target_properties(sodium PROPERTIES
|
||||
IMPORTED_IMPLIB "${sodium_LIBRARY_RELEASE}"
|
||||
IMPORTED_IMPLIB_DEBUG "${sodium_LIBRARY_DEBUG}"
|
||||
)
|
||||
if (NOT (sodium_DLL_DEBUG MATCHES ".*-NOTFOUND"))
|
||||
set_target_properties(sodium PROPERTIES
|
||||
IMPORTED_LOCATION_DEBUG "${sodium_DLL_DEBUG}"
|
||||
)
|
||||
endif()
|
||||
if (NOT (sodium_DLL_RELEASE MATCHES ".*-NOTFOUND"))
|
||||
set_target_properties(sodium PROPERTIES
|
||||
IMPORTED_LOCATION_RELWITHDEBINFO "${sodium_DLL_RELEASE}"
|
||||
IMPORTED_LOCATION_MINSIZEREL "${sodium_DLL_RELEASE}"
|
||||
IMPORTED_LOCATION_RELEASE "${sodium_DLL_RELEASE}"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
60
cmake/GenVersion.cmake
Normal file
60
cmake/GenVersion.cmake
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Copyright (c) 2014-2019, The Monero Project
|
||||
# Copyright (c) 2019-2022, The Oxen Project
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification, are
|
||||
# permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
# conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
# of conditions and the following disclaimer in the documentation and/or other
|
||||
# materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without specific
|
||||
# prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
|
||||
|
||||
# Check what commit we're on
|
||||
execute_process(COMMAND "${GIT}" rev-parse --short HEAD RESULT_VARIABLE RET OUTPUT_VARIABLE COMMIT OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(RET)
|
||||
# Something went wrong, set the version tag to -unknown
|
||||
message(WARNING "Cannot determine current commit. Make sure that you are building either from a Git working tree or from a source archive.")
|
||||
set(VERSIONTAG "unknown")
|
||||
else()
|
||||
message(STATUS "You are currently on commit ${COMMIT}")
|
||||
|
||||
# Get all the tags
|
||||
execute_process(COMMAND "${GIT}" rev-list --tags --max-count=1 --abbrev-commit RESULT_VARIABLE RET OUTPUT_VARIABLE TAGGEDCOMMIT OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(NOT TAGGEDCOMMIT)
|
||||
message(WARNING "Cannot determine most recent tag. Make sure that you are building either from a Git working tree or from a source archive.")
|
||||
set(VERSIONTAG "${COMMIT}")
|
||||
else()
|
||||
# Check if we're building that tagged commit or a different one
|
||||
if(COMMIT STREQUAL TAGGEDCOMMIT)
|
||||
message(STATUS "${COMMIT} is a tagged release; setting version tag to 'release'")
|
||||
set(VERSIONTAG "release")
|
||||
else()
|
||||
message(STATUS "You are not building a tagged release; setting version tag to '${COMMIT}'")
|
||||
set(VERSIONTAG "${COMMIT}")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
configure_file("${SRC}" "${DEST}" @ONLY)
|
|
@ -5,20 +5,12 @@
|
|||
|
||||
set(LOCAL_MIRROR "" CACHE STRING "local mirror path/URL for lib downloads")
|
||||
|
||||
set(OPENSSL_VERSION 1.1.1i CACHE STRING "openssl version")
|
||||
set(OPENSSL_VERSION 3.0.7 CACHE STRING "openssl version")
|
||||
set(OPENSSL_MIRROR ${LOCAL_MIRROR} https://www.openssl.org/source CACHE STRING "openssl download mirror(s)")
|
||||
set(OPENSSL_SOURCE openssl-${OPENSSL_VERSION}.tar.gz)
|
||||
set(OPENSSL_HASH SHA256=e8be6a35fe41d10603c3cc635e93289ed00bf34b79671a3a4de64fcee00d5242
|
||||
set(OPENSSL_HASH SHA256=83049d042a260e696f62406ac5c08bf706fd84383f945cf21bd61e9ed95c396e
|
||||
CACHE STRING "openssl source hash")
|
||||
|
||||
set(BOOST_VERSION 1.75.0 CACHE STRING "boost version")
|
||||
set(BOOST_MIRROR ${LOCAL_MIRROR} https://dl.bintray.com/boostorg/release/${BOOST_VERSION}/source
|
||||
CACHE STRING "boost download mirror(s)")
|
||||
string(REPLACE "." "_" BOOST_VERSION_ ${BOOST_VERSION})
|
||||
set(BOOST_SOURCE boost_${BOOST_VERSION_}.tar.bz2)
|
||||
set(BOOST_HASH SHA256=953db31e016db7bb207f11432bef7df100516eeb746843fa0486a222e3fd49cb
|
||||
CACHE STRING "boost source hash")
|
||||
|
||||
set(SODIUM_VERSION 1.0.18 CACHE STRING "libsodium version")
|
||||
set(SODIUM_MIRROR ${LOCAL_MIRROR}
|
||||
https://download.libsodium.org/libsodium/releases
|
||||
|
@ -28,20 +20,36 @@ set(SODIUM_SOURCE libsodium-${SODIUM_VERSION}.tar.gz)
|
|||
set(SODIUM_HASH SHA512=17e8638e46d8f6f7d024fe5559eccf2b8baf23e143fadd472a7d29d228b186d86686a5e6920385fe2020729119a5f12f989c3a782afbd05a8db4819bb18666ef
|
||||
CACHE STRING "libsodium source hash")
|
||||
|
||||
set(SQLITE3_VERSION 3340000 CACHE STRING "sqlite3 version")
|
||||
set(SQLITE3_MIRROR ${LOCAL_MIRROR} https://www.sqlite.org/2020
|
||||
CACHE STRING "sqlite3 download mirror(s)")
|
||||
set(SQLITE3_SOURCE sqlite-autoconf-${SQLITE3_VERSION}.tar.gz)
|
||||
set(SQLITE3_HASH SHA512=75a1a2d86ab41354941b8574e780b1eae09c3c01f8da4b08f606b96962b80550f739ec7e9b1ceb07bba1cedced6d18a1408e4c10ff645eb1829d368ad308cf2f
|
||||
CACHE STRING "sqlite3 source hash")
|
||||
include(sqlite3_source)
|
||||
|
||||
set(ZMQ_VERSION 4.3.3 CACHE STRING "libzmq version")
|
||||
set(ZMQ_VERSION 4.3.4 CACHE STRING "libzmq version")
|
||||
set(ZMQ_MIRROR ${LOCAL_MIRROR} https://github.com/zeromq/libzmq/releases/download/v${ZMQ_VERSION}
|
||||
CACHE STRING "libzmq mirror(s)")
|
||||
set(ZMQ_SOURCE zeromq-${ZMQ_VERSION}.tar.gz)
|
||||
set(ZMQ_HASH SHA512=4c18d784085179c5b1fcb753a93813095a12c8d34970f2e1bfca6499be6c9d67769c71c68b7ca54ff181b20390043170e89733c22f76ff1ea46494814f7095b1
|
||||
set(ZMQ_HASH SHA512=e198ef9f82d392754caadd547537666d4fba0afd7d027749b3adae450516bcf284d241d4616cad3cb4ad9af8c10373d456de92dc6d115b037941659f141e7c0e
|
||||
CACHE STRING "libzmq source hash")
|
||||
|
||||
set(LIBUV_VERSION 1.44.2 CACHE STRING "libuv version")
|
||||
set(LIBUV_MIRROR ${LOCAL_MIRROR} https://dist.libuv.org/dist/v${LIBUV_VERSION}
|
||||
CACHE STRING "libuv mirror(s)")
|
||||
set(LIBUV_SOURCE libuv-v${LIBUV_VERSION}.tar.gz)
|
||||
set(LIBUV_HASH SHA512=91197ff9303112567bbb915bbb88058050e2ad1c048815a3b57c054635d5dc7df458b956089d785475290132236cb0edcfae830f5d749de29a9a3213eeaf0b20
|
||||
CACHE STRING "libuv source hash")
|
||||
|
||||
set(ZLIB_VERSION 1.2.13 CACHE STRING "zlib version")
|
||||
set(ZLIB_MIRROR ${LOCAL_MIRROR} https://zlib.net
|
||||
CACHE STRING "zlib mirror(s)")
|
||||
set(ZLIB_SOURCE zlib-${ZLIB_VERSION}.tar.xz)
|
||||
set(ZLIB_HASH SHA256=d14c38e313afc35a9a8760dadf26042f51ea0f5d154b0630a31da0540107fb98
|
||||
CACHE STRING "zlib source hash")
|
||||
|
||||
set(CURL_VERSION 7.87.0 CACHE STRING "curl version")
|
||||
set(CURL_MIRROR ${LOCAL_MIRROR} https://curl.se/download https://curl.askapache.com
|
||||
CACHE STRING "curl mirror(s)")
|
||||
set(CURL_SOURCE curl-${CURL_VERSION}.tar.xz)
|
||||
set(CURL_HASH SHA512=aa125991592667280dce3788aabe81487cf8c55b0afc59d675cc30b76055bb7114f5380b4a0e3b6461a8f81bf9812fa26d493a85f7e01d84263d484a0d699ee7
|
||||
CACHE STRING "curl source hash")
|
||||
|
||||
|
||||
|
||||
include(ExternalProject)
|
||||
|
@ -162,6 +170,26 @@ function(build_external target)
|
|||
)
|
||||
endfunction()
|
||||
|
||||
if (WIN32 OR (APPLE AND NOT IOS))
|
||||
build_external(libuv
|
||||
CONFIGURE_COMMAND ./autogen.sh && ./configure ${cross_host} ${cross_rc} --prefix=${DEPS_DESTDIR} --with-pic --disable-shared --enable-static "CC=${deps_cc}" "CFLAGS=${deps_CFLAGS}"
|
||||
BUILD_BYPRODUCTS
|
||||
${DEPS_DESTDIR}/lib/libuv.a
|
||||
${DEPS_DESTDIR}/include/uv.h
|
||||
)
|
||||
add_static_target(libuv libuv_external libuv.a)
|
||||
target_link_libraries(libuv INTERFACE ${CMAKE_DL_LIBS})
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
build_external(zlib
|
||||
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env "CC=${deps_cc}" "CFLAGS=${deps_CFLAGS} -fPIC" ${cross_extra} ./configure --prefix=${DEPS_DESTDIR} --static
|
||||
BUILD_BYPRODUCTS
|
||||
${DEPS_DESTDIR}/lib/libz.a
|
||||
${DEPS_DESTDIR}/include/zlib.h
|
||||
)
|
||||
add_static_target(zlib zlib_external libz.a)
|
||||
|
||||
|
||||
set(openssl_configure ./config)
|
||||
|
@ -176,9 +204,10 @@ if(CMAKE_CROSSCOMPILING)
|
|||
endif()
|
||||
build_external(openssl
|
||||
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env CC=${openssl_cc} ${openssl_system_env} ${openssl_configure}
|
||||
--prefix=${DEPS_DESTDIR} ${openssl_extra_opts} no-shared no-capieng no-dso no-dtls1 no-ec_nistp_64_gcc_128 no-gost
|
||||
--prefix=${DEPS_DESTDIR} --libdir=lib ${openssl_extra_opts}
|
||||
no-shared no-capieng no-dso no-dtls1 no-ec_nistp_64_gcc_128 no-gost
|
||||
no-heartbeats no-md2 no-rc5 no-rdrand no-rfc3779 no-sctp no-ssl-trace no-ssl2 no-ssl3
|
||||
no-static-engine no-tests no-weak-ssl-ciphers no-zlib-dynamic "CFLAGS=${deps_CFLAGS}"
|
||||
no-static-engine no-tests no-weak-ssl-ciphers no-zlib no-zlib-dynamic "CFLAGS=${deps_CFLAGS}"
|
||||
INSTALL_COMMAND make install_sw
|
||||
BUILD_BYPRODUCTS
|
||||
${DEPS_DESTDIR}/lib/libssl.a ${DEPS_DESTDIR}/lib/libcrypto.a
|
||||
|
@ -186,88 +215,15 @@ build_external(openssl
|
|||
)
|
||||
add_static_target(OpenSSL::SSL openssl_external libssl.a)
|
||||
add_static_target(OpenSSL::Crypto openssl_external libcrypto.a)
|
||||
target_link_libraries(OpenSSL::SSL INTERFACE OpenSSL::Crypto)
|
||||
set(OPENSSL_INCLUDE_DIR ${DEPS_DESTDIR}/include)
|
||||
set(OPENSSL_VERSION 1.1.1)
|
||||
|
||||
|
||||
|
||||
set(boost_threadapi "pthread")
|
||||
set(boost_bootstrap_cxx "CXX=${deps_cxx}")
|
||||
set(boost_toolset "")
|
||||
set(boost_extra "")
|
||||
if(USE_LTO)
|
||||
list(APPEND boost_extra "lto=on")
|
||||
endif()
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
set(boost_bootstrap_cxx "") # need to use our native compiler to bootstrap
|
||||
if(ARCH_TRIPLET MATCHES mingw)
|
||||
set(boost_threadapi win32)
|
||||
list(APPEND boost_extra "target-os=windows")
|
||||
if(ARCH_TRIPLET MATCHES x86_64)
|
||||
list(APPEND boost_extra "address-model=64")
|
||||
else()
|
||||
list(APPEND boost_extra "address-model=32")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL GNU)
|
||||
set(boost_toolset gcc)
|
||||
elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
|
||||
set(boost_toolset clang)
|
||||
else()
|
||||
message(FATAL_ERROR "don't know how to build boost with ${CMAKE_CXX_COMPILER_ID}")
|
||||
endif()
|
||||
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/user-config.bjam "using ${boost_toolset} : : ${deps_cxx} ;")
|
||||
|
||||
set(boost_buildflags "cxxflags=-fPIC")
|
||||
if(APPLE AND CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||
string(APPEND boost_buildflags " -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}" "cflags=-mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||
endif()
|
||||
|
||||
set(boost_libs program_options system)
|
||||
if(BUILD_TESTS)
|
||||
list(APPEND boost_libs unit_test_framework)
|
||||
endif()
|
||||
string(REPLACE ";" "," boost_with_libraries "${boost_libs}")
|
||||
set(boost_static_libraries)
|
||||
foreach(lib ${boost_libs})
|
||||
list(APPEND boost_static_libraries "${DEPS_DESTDIR}/lib/libboost_${lib}.a")
|
||||
endforeach()
|
||||
|
||||
build_external(boost
|
||||
# PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/user-config.bjam tools/build/src/user-config.jam
|
||||
CONFIGURE_COMMAND
|
||||
${CMAKE_COMMAND} -E env ${boost_bootstrap_cxx}
|
||||
./bootstrap.sh --without-icu --prefix=${DEPS_DESTDIR} --with-toolset=${boost_toolset}
|
||||
--with-libraries=${boost_with_libraries}
|
||||
BUILD_COMMAND true
|
||||
INSTALL_COMMAND
|
||||
./b2 -d0 variant=release link=static runtime-link=static optimization=speed ${boost_extra}
|
||||
threading=multi threadapi=${boost_threadapi} ${boost_buildflags} cxxstd=14 visibility=global
|
||||
--disable-icu --user-config=${CMAKE_CURRENT_BINARY_DIR}/user-config.bjam
|
||||
install
|
||||
BUILD_BYPRODUCTS
|
||||
${boost_static_libraries}
|
||||
${DEPS_DESTDIR}/include/boost/version.hpp
|
||||
)
|
||||
add_library(boost_core INTERFACE)
|
||||
add_dependencies(boost_core INTERFACE boost_external)
|
||||
target_include_directories(boost_core SYSTEM INTERFACE ${DEPS_DESTDIR}/include)
|
||||
add_library(Boost::boost ALIAS boost_core)
|
||||
foreach(lib ${boost_libs})
|
||||
add_static_target(Boost::${lib} boost_external libboost_${lib}.a)
|
||||
target_link_libraries(Boost::${lib} INTERFACE boost_core)
|
||||
endforeach()
|
||||
set(Boost_FOUND ON)
|
||||
set(Boost_VERSION ${BOOST_VERSION})
|
||||
|
||||
|
||||
|
||||
build_external(sqlite3
|
||||
BUILD_COMMAND true
|
||||
INSTALL_COMMAND make install-includeHEADERS install-libLTLIBRARIES)
|
||||
add_static_target(sqlite3 sqlite3_external libsqlite3.a)
|
||||
add_static_target(SQLite::SQLite3 sqlite3_external libsqlite3.a)
|
||||
|
||||
|
||||
|
||||
|
@ -275,7 +231,7 @@ build_external(sodium)
|
|||
add_static_target(sodium sodium_external libsodium.a)
|
||||
|
||||
|
||||
if(ZMQ_VERSION VERSION_LESS 4.3.4 AND CMAKE_CROSSCOMPILING AND ARCH_TRIPLET MATCHES mingw)
|
||||
if(CMAKE_CROSSCOMPILING AND ARCH_TRIPLET MATCHES mingw)
|
||||
set(zmq_patch PATCH_COMMAND patch -p1 -i ${PROJECT_SOURCE_DIR}/utils/build_scripts/libzmq-mingw-closesocket.patch)
|
||||
endif()
|
||||
|
||||
|
@ -301,3 +257,46 @@ endif()
|
|||
set_target_properties(libzmq PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "${libzmq_link_libs}"
|
||||
INTERFACE_COMPILE_DEFINITIONS "ZMQ_STATIC")
|
||||
|
||||
|
||||
set(curl_extra)
|
||||
if(WIN32)
|
||||
set(curl_ssl_opts --with-schannel)
|
||||
elseif(APPLE)
|
||||
set(curl_ssl_opts --with-secure-transport)
|
||||
else()
|
||||
set(curl_ssl_opts --with-openssl=${DEPS_DESTDIR})
|
||||
set(curl_extra "LIBS=-pthread")
|
||||
endif()
|
||||
|
||||
build_external(curl
|
||||
DEPENDS openssl_external zlib_external
|
||||
CONFIGURE_COMMAND ./configure ${cross_host} ${cross_extra} --prefix=${DEPS_DESTDIR} --disable-shared
|
||||
--enable-static --disable-ares --disable-ftp --disable-ldap --disable-laps --disable-rtsp
|
||||
--disable-dict --disable-telnet --disable-tftp --disable-pop3 --disable-imap --disable-smb
|
||||
--disable-smtp --disable-gopher --disable-manual --disable-libcurl-option --enable-http
|
||||
--enable-ipv6 --disable-threaded-resolver --disable-pthreads --disable-verbose --disable-sspi
|
||||
--enable-crypto-auth --disable-ntlm-wb --disable-tls-srp --disable-unix-sockets --disable-cookies
|
||||
--enable-http-auth --enable-doh --disable-mime --enable-dateparse --disable-netrc --without-libidn2
|
||||
--disable-progress-meter --without-brotli --with-zlib=${DEPS_DESTDIR} ${curl_ssl_opts}
|
||||
--without-librtmp --disable-versioned-symbols --enable-hidden-symbols
|
||||
--without-zsh-functions-dir --without-fish-functions-dir --without-zstd
|
||||
--without-nghttp2 --without-nghttp3 --without-ngtcp2 --without-quiche
|
||||
"CC=${deps_cc}" "CFLAGS=${deps_noarch_CFLAGS}${cflags_extra}" ${curl_extra}
|
||||
BUILD_COMMAND true
|
||||
INSTALL_COMMAND make -C lib install && make -C include install
|
||||
BUILD_BYPRODUCTS
|
||||
${DEPS_DESTDIR}/lib/libcurl.a
|
||||
${DEPS_DESTDIR}/include/curl/curl.h
|
||||
)
|
||||
|
||||
add_static_target(CURL::libcurl curl_external libcurl.a)
|
||||
set(libcurl_link_libs OpenSSL::SSL zlib)
|
||||
if(CMAKE_CROSSCOMPILING AND ARCH_TRIPLET MATCHES mingw)
|
||||
list(APPEND libcurl_link_libs crypt32)
|
||||
elseif(APPLE)
|
||||
list(APPEND libcurl_link_libs "-framework Security -framework CoreFoundation -framework SystemConfiguration")
|
||||
endif()
|
||||
set_target_properties(CURL::libcurl PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "${libcurl_link_libs}"
|
||||
INTERFACE_COMPILE_DEFINITIONS "CURL_STATICLIB")
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# Set up a `make strip` target that strips the built binary.
|
||||
add_custom_target(strip COMMAND ${CMAKE_STRIP} $<TARGET_FILE:httpserver>)
|
||||
add_custom_target(strip COMMAND ${CMAKE_STRIP} $<TARGET_FILE:daemon>)
|
||||
|
||||
# Figure out an appropriate tag using git to figure out a good filename
|
||||
find_package(Git)
|
||||
set(git_tag "-unknown")
|
||||
if(GIT_FOUND)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" rev-parse HEAD RESULT_VARIABLE ret OUTPUT_VARIABLE curr_commit OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" rev-parse master RESULT_VARIABLE ret2 OUTPUT_VARIABLE stable_commit OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND "${GIT_EXECUTABLE}" rev-parse stable RESULT_VARIABLE ret2 OUTPUT_VARIABLE stable_commit OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
if(NOT ret AND curr_commit STREQUAL "${stable_commit}")
|
||||
# Get the tag description; for a tagged release this will be just the tag (v1.2.3); for
|
||||
# something following a tag this will be something like "v1.2.3-2-abcdef" for something 2
|
||||
|
@ -45,15 +45,15 @@ endif()
|
|||
set(tar_dir "oxen-storage-${tar_os}-${PROJECT_VERSION}${git_tag}")
|
||||
add_custom_target(create_tarxz
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:httpserver> "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:daemon> "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E tar cvJ "${tar_dir}.tar.xz" -- "${tar_dir}"
|
||||
DEPENDS httpserver)
|
||||
DEPENDS daemon)
|
||||
|
||||
add_custom_target(create_zip
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:httpserver> "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:daemon> "${tar_dir}"
|
||||
COMMAND ${CMAKE_COMMAND} -E tar cv "${tar_dir}.zip" --format=zip -- "${tar_dir}"
|
||||
DEPENDS httpserver)
|
||||
DEPENDS daemon)
|
||||
|
||||
add_custom_target(create_archive DEPENDS ${default_archive})
|
||||
|
||||
|
|
41
cmake/check_atomic.cmake
Normal file
41
cmake/check_atomic.cmake
Normal file
|
@ -0,0 +1,41 @@
|
|||
include(CheckCXXSourceCompiles)
|
||||
include(CheckLibraryExists)
|
||||
|
||||
function(check_working_cxx_atomics64 varname)
|
||||
set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS})
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -std=c++11")
|
||||
check_cxx_source_compiles("
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
std::atomic<uint64_t> x (0);
|
||||
int main() {
|
||||
uint64_t i = x.load(std::memory_order_relaxed);
|
||||
return 0;
|
||||
}
|
||||
" ${varname})
|
||||
set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS})
|
||||
endfunction()
|
||||
|
||||
function(link_libatomic)
|
||||
check_working_cxx_atomics64(HAVE_CXX_ATOMICS64_WITHOUT_LIB)
|
||||
|
||||
if(HAVE_CXX_ATOMICS64_WITHOUT_LIB)
|
||||
message(STATUS "Have working 64bit atomics")
|
||||
return()
|
||||
endif()
|
||||
|
||||
check_library_exists(atomic __atomic_load_8 "" HAVE_CXX_LIBATOMICS64)
|
||||
if (HAVE_CXX_LIBATOMICS64)
|
||||
message(STATUS "Have 64bit atomics via library")
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES "atomic")
|
||||
check_working_cxx_atomics64(HAVE_CXX_ATOMICS64_WITH_LIB)
|
||||
if (HAVE_CXX_ATOMICS64_WITH_LIB)
|
||||
message(STATUS "Can link with libatomic")
|
||||
link_libraries(atomic)
|
||||
return()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(FATAL_ERROR "Host compiler must support 64-bit std::atomic!")
|
||||
endfunction()
|
||||
|
19
cmake/sqlite3_source.cmake
Normal file
19
cmake/sqlite3_source.cmake
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Source version, download location, hash for sqlite3.
|
||||
#
|
||||
# This gets used both in the full StaticBuild code *and* in the general build code when the system
|
||||
# sqlite3 version is too old.
|
||||
|
||||
set(SQLITE3_VERSION "3390200" CACHE STRING "sqlite3 version")
|
||||
set(SQLITE3_MIRROR ${LOCAL_MIRROR} https://www.sqlite.org/2022
|
||||
CACHE STRING "sqlite3 download mirror(s)")
|
||||
set(SQLITE3_SOURCE sqlite-autoconf-${SQLITE3_VERSION}.tar.gz)
|
||||
set(SQLITE3_HASH SHA512=c16b50ade3c182d5473014ac0a51e2bb8a5cfc46e532c2bda77ae4d530336e2b57aa4f12dccb6aa2148d60e9289305bf20842ac95dc52f2d31df8eb5f0599de6
|
||||
CACHE STRING "sqlite3 source hash")
|
||||
|
||||
if(SQLITE3_VERSION MATCHES "^([0-9]+)(0([0-9])|([1-9][0-9]))(0([0-9])|([1-9][0-9]))[0-9][0-9]$")
|
||||
set(SQLite3_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_3}${CMAKE_MATCH_4}.${CMAKE_MATCH_6}${CMAKE_MATCH_7}" CACHE STRING "" FORCE)
|
||||
mark_as_advanced(SQLite3_VERSION)
|
||||
message(STATUS "Building static sqlite3 ${SQLite3_VERSION}")
|
||||
else()
|
||||
message(FATAL_ERROR "Couldn't figure out sqlite3 version from '${SQLITE3_VERSION}'")
|
||||
endif()
|
|
@ -1,11 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.1)
|
||||
|
||||
add_definitions(-DSPDLOG_COMPILED_LIB)
|
||||
|
||||
add_library(common STATIC
|
||||
src/oxen_logger.cpp
|
||||
)
|
||||
|
||||
add_subdirectory(../vendors/spdlog spdlog)
|
||||
target_link_libraries(common PUBLIC spdlog::spdlog filesystem)
|
||||
target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include)
|
|
@ -1,74 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <spdlog/sinks/base_sink.h>
|
||||
#include <vector>
|
||||
// A sink used to store most important logs for developers
|
||||
|
||||
namespace oxen {
|
||||
|
||||
template <typename Mutex>
|
||||
class dev_sink : public spdlog::sinks::base_sink<Mutex> {
|
||||
using Base = spdlog::sinks::base_sink<Mutex>;
|
||||
|
||||
// Potentially all entries will be returned in a
|
||||
// single message, so we should keep the limit
|
||||
// relatively small
|
||||
static constexpr size_t BUFFER_SIZE = 100;
|
||||
static constexpr size_t MAX_ENTRIES = 2 * BUFFER_SIZE;
|
||||
|
||||
std::vector<std::string> primary_buffer_;
|
||||
std::vector<std::string> secondary_buffer_;
|
||||
// size_t log_entires
|
||||
|
||||
protected:
|
||||
void sink_it_(const spdlog::details::log_msg& msg) override {
|
||||
spdlog::memory_buf_t formatted;
|
||||
Base::formatter_->format(msg, formatted);
|
||||
|
||||
if (primary_buffer_.size() >= BUFFER_SIZE) {
|
||||
secondary_buffer_ = std::move(primary_buffer_);
|
||||
primary_buffer_.clear();
|
||||
}
|
||||
|
||||
primary_buffer_.push_back(fmt::to_string(formatted));
|
||||
}
|
||||
|
||||
void flush_() override {
|
||||
// no op
|
||||
}
|
||||
|
||||
public:
|
||||
dev_sink() : spdlog::sinks::base_sink<Mutex>() {
|
||||
primary_buffer_.reserve(BUFFER_SIZE);
|
||||
secondary_buffer_.reserve(BUFFER_SIZE);
|
||||
}
|
||||
|
||||
std::vector<std::string> peek() {
|
||||
|
||||
std::lock_guard<Mutex> lock{this->mutex_};
|
||||
|
||||
std::vector<std::string> result;
|
||||
result.reserve(MAX_ENTRIES);
|
||||
|
||||
for (auto it = primary_buffer_.end() - 1;
|
||||
it >= primary_buffer_.begin() && result.size() < MAX_ENTRIES;
|
||||
--it) {
|
||||
result.push_back(*it);
|
||||
}
|
||||
|
||||
for (auto it = secondary_buffer_.end() - 1;
|
||||
it >= secondary_buffer_.begin() && result.size() < MAX_ENTRIES;
|
||||
--it) {
|
||||
result.push_back(*it);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
#include <mutex>
|
||||
using dev_sink_mt = dev_sink<std::mutex>;
|
||||
|
||||
} // namespace loki
|
|
@ -1,206 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "spdlog/fmt/ostr.h" // for operator<< overload
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// TODO: this should be a proper struct w/o heap allocation!
|
||||
using sn_pub_key_t = std::string;
|
||||
|
||||
using time_point_t = std::chrono::time_point<std::chrono::steady_clock>;
|
||||
|
||||
struct sn_record_t {
|
||||
|
||||
// our 32 byte pub keys should always be 52 bytes long in base32z
|
||||
static constexpr size_t BASE_LEN = 52;
|
||||
|
||||
private:
|
||||
uint16_t port_;
|
||||
// Required by OxenMQ
|
||||
uint16_t lmq_port_;
|
||||
// TODO: create separate types for different encodings of pubkeys,
|
||||
// so if we confuse them, it will be a compiler error
|
||||
// Snode address (pubkey plus .snode, was used for lokinet)
|
||||
std::string sn_address_;
|
||||
// We don't need this! (esp. since it is legacy key)
|
||||
std::string pub_key_base_32z_;
|
||||
std::string pub_key_hex_; // Monero legacy key
|
||||
std::string pubkey_x25519_hex_;
|
||||
std::string pubkey_x25519_bin_;
|
||||
std::string pubkey_ed25519_hex_;
|
||||
std::string ip_; // Snode ip
|
||||
|
||||
/// Set service node's public key in base32z (without .snode part)
|
||||
void set_address(const std::string& addr) {
|
||||
|
||||
if (addr.size() != BASE_LEN)
|
||||
throw std::runtime_error("snode public key has incorrect size");
|
||||
|
||||
sn_address_ = addr;
|
||||
sn_address_.append(".snode");
|
||||
pub_key_base_32z_ = addr;
|
||||
}
|
||||
|
||||
public:
|
||||
sn_record_t(uint16_t port, uint16_t lmq_port, const std::string& address,
|
||||
const std::string& pk_hex, const std::string& pk_x25519,
|
||||
const std::string& pk_x25519_bin, const std::string& pk_ed25519,
|
||||
const std::string& ip)
|
||||
: port_(port), lmq_port_(lmq_port), pub_key_hex_(pk_hex),
|
||||
pubkey_x25519_hex_(pk_x25519), pubkey_x25519_bin_(pk_x25519_bin),
|
||||
pubkey_ed25519_hex_(pk_ed25519), ip_(ip) {
|
||||
set_address(address);
|
||||
}
|
||||
|
||||
sn_record_t() = default;
|
||||
|
||||
// Sometimes the IP can change
|
||||
void set_ip(const std::string& ip) { ip_ = ip; }
|
||||
|
||||
uint16_t port() const { return port_; }
|
||||
uint16_t lmq_port() const { return lmq_port_; }
|
||||
const std::string& sn_address() const { return sn_address_; }
|
||||
const std::string& pub_key_base32z() const { return pub_key_base_32z_; }
|
||||
const std::string& pub_key_hex() const { return pub_key_hex_; }
|
||||
const std::string& pubkey_x25519_hex() const { return pubkey_x25519_hex_; }
|
||||
const std::string& pubkey_ed25519_hex() const {
|
||||
return pubkey_ed25519_hex_;
|
||||
}
|
||||
const std::string& pubkey_x25519_bin() const { return pubkey_x25519_bin_; }
|
||||
const std::string& ip() const { return ip_; }
|
||||
|
||||
template <typename OStream>
|
||||
friend OStream& operator<<(OStream& os, const sn_record_t& record) {
|
||||
#ifdef INTEGRATION_TEST
|
||||
os << record.port();
|
||||
#else
|
||||
os << record.sn_address();
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
namespace oxen {
|
||||
|
||||
constexpr size_t MAINNET_USER_PUBKEY_SIZE = 66;
|
||||
constexpr size_t TESTNET_USER_PUBKEY_SIZE = 64;
|
||||
|
||||
struct net_type_t {
|
||||
|
||||
static net_type_t& get_instance() {
|
||||
static net_type_t net_type;
|
||||
return net_type;
|
||||
}
|
||||
|
||||
void set_testnet() { is_mainnet_ = false; }
|
||||
bool is_mainnet() { return is_mainnet_; }
|
||||
|
||||
private:
|
||||
bool is_mainnet_ = true;
|
||||
net_type_t() = default;
|
||||
};
|
||||
|
||||
inline bool is_mainnet() { return net_type_t::get_instance().is_mainnet(); }
|
||||
|
||||
inline void set_testnet() { net_type_t::get_instance().set_testnet(); }
|
||||
|
||||
inline size_t get_user_pubkey_size() {
|
||||
/// TODO: eliminate the need to check condition every time
|
||||
if (oxen::is_mainnet()) {
|
||||
return MAINNET_USER_PUBKEY_SIZE;
|
||||
} else {
|
||||
return TESTNET_USER_PUBKEY_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
class user_pubkey_t {
|
||||
|
||||
std::string pubkey_;
|
||||
|
||||
user_pubkey_t() {}
|
||||
|
||||
user_pubkey_t(std::string&& pk) : pubkey_(std::move(pk)) {}
|
||||
|
||||
user_pubkey_t(const std::string& pk) : pubkey_(pk) {}
|
||||
|
||||
public:
|
||||
static user_pubkey_t create(std::string&& pk, bool& success) {
|
||||
success = true;
|
||||
if (pk.size() != get_user_pubkey_size()) {
|
||||
success = false;
|
||||
return {};
|
||||
}
|
||||
return user_pubkey_t(std::move(pk));
|
||||
}
|
||||
|
||||
static user_pubkey_t create(const std::string& pk, bool& success) {
|
||||
success = true;
|
||||
if (pk.size() != get_user_pubkey_size()) {
|
||||
success = false;
|
||||
return {};
|
||||
}
|
||||
return user_pubkey_t(pk);
|
||||
}
|
||||
|
||||
const std::string& str() const { return pubkey_; }
|
||||
};
|
||||
|
||||
/// message as received by client
|
||||
struct message_t {
|
||||
|
||||
std::string pub_key;
|
||||
std::string data;
|
||||
std::string hash;
|
||||
uint64_t ttl;
|
||||
uint64_t timestamp;
|
||||
std::string nonce;
|
||||
|
||||
message_t(const std::string& pk, const std::string& text,
|
||||
const std::string& hash, uint64_t ttl, uint64_t timestamp,
|
||||
const std::string& nonce)
|
||||
: pub_key(pk), data(text), hash(hash), ttl(ttl), timestamp(timestamp),
|
||||
nonce(nonce) {}
|
||||
};
|
||||
|
||||
} // namespace oxen
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
struct hash<sn_record_t> {
|
||||
std::size_t operator()(const sn_record_t& k) const {
|
||||
return hash<std::string>{}(k.pub_key_hex());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace std
|
||||
|
||||
inline bool operator<(const sn_record_t& lhs, const sn_record_t& rhs) {
|
||||
return lhs.pub_key_hex() < rhs.pub_key_hex();
|
||||
}
|
||||
|
||||
[[maybe_unused]] static std::ostream& operator<<(std::ostream& os,
|
||||
const sn_record_t& sn) {
|
||||
#ifdef INTEGRATION_TEST
|
||||
return os << sn.port();
|
||||
#else
|
||||
return os << sn.sn_address();
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool operator==(const sn_record_t& lhs, const sn_record_t& rhs) {
|
||||
// TODO: Change this to ed keys:
|
||||
return lhs.pub_key_hex() == rhs.pub_key_hex();
|
||||
}
|
||||
|
||||
[[maybe_unused]] static bool operator!=(const sn_record_t& lhs,
|
||||
const sn_record_t& rhs) {
|
||||
return !operator==(lhs, rhs);
|
||||
}
|
||||
|
||||
using swarm_id_t = uint64_t;
|
||||
|
||||
constexpr swarm_id_t INVALID_SWARM_ID = UINT64_MAX;
|
|
@ -1,36 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "spdlog/spdlog.h"
|
||||
|
||||
#define OXEN_LOG_N(LVL, msg, ...) \
|
||||
spdlog::get("oxen_logger")->LVL("[{}] " msg, __func__, __VA_ARGS__)
|
||||
#define OXEN_LOG_2(LVL, msg) \
|
||||
spdlog::get("oxen_logger")->LVL("[{}] " msg, __func__)
|
||||
|
||||
#define GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, NAME, ...) NAME
|
||||
#define OXEN_LOG(...) \
|
||||
GET_MACRO(__VA_ARGS__, OXEN_LOG_N, OXEN_LOG_N, OXEN_LOG_N, OXEN_LOG_N, \
|
||||
OXEN_LOG_N, OXEN_LOG_N, OXEN_LOG_N, OXEN_LOG_2) \
|
||||
(__VA_ARGS__)
|
||||
|
||||
namespace oxen {
|
||||
using LogLevelPair = std::pair<std::string, spdlog::level::level_enum>;
|
||||
using LogLevelMap = std::vector<LogLevelPair>;
|
||||
using LogLevel = spdlog::level::level_enum;
|
||||
// clang-format off
|
||||
static const LogLevelMap logLevelMap{
|
||||
{"trace", LogLevel::trace},
|
||||
{"debug", LogLevel::debug},
|
||||
{"info", LogLevel::info},
|
||||
{"warning", LogLevel::warn},
|
||||
{"error", LogLevel::err},
|
||||
{"critical", LogLevel::critical}
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
void init_logging(const std::string& data_dir, LogLevel log_level);
|
||||
|
||||
void print_log_levels();
|
||||
|
||||
bool parse_log_level(const std::string& input, LogLevel& logLevel);
|
||||
} // namespace oxen
|
|
@ -1,88 +0,0 @@
|
|||
#include "oxen_logger.h"
|
||||
|
||||
// clang-format off
|
||||
#include "spdlog/sinks/stdout_color_sinks.h"
|
||||
#include "spdlog/sinks/rotating_file_sink.h"
|
||||
#include "dev_sink.h"
|
||||
// clang-format on
|
||||
#include <filesystem>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
bool parse_log_level(const std::string& input,
|
||||
spdlog::level::level_enum& logLevel) {
|
||||
|
||||
const auto it = std::find_if(
|
||||
logLevelMap.begin(), logLevelMap.end(),
|
||||
[&](const LogLevelPair& pair) { return pair.first == input; });
|
||||
if (it != logLevelMap.end()) {
|
||||
logLevel = it->second;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void print_log_levels() {
|
||||
std::cerr << " Log Levels:\n";
|
||||
for (const auto& logLevel : logLevelMap) {
|
||||
std::cerr << " " << logLevel.first << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
void init_logging(const std::string& data_dir,
|
||||
spdlog::level::level_enum log_level) {
|
||||
|
||||
const std::string log_location =
|
||||
(fs::u8path(data_dir) / "storage.logs").u8string();
|
||||
// Log to disk output stream
|
||||
const auto input = std::shared_ptr<std::ofstream>(
|
||||
new std::ofstream(log_location, std::ios::out | std::ios::app));
|
||||
if (input->is_open()) {
|
||||
input->close();
|
||||
} else {
|
||||
std::cerr << "Could not open " << log_location << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
constexpr size_t LOG_FILE_SIZE_LIMIT = 1024 * 1024 * 50; // 50Mb
|
||||
constexpr size_t EXTRA_FILES = 1;
|
||||
|
||||
auto console_sink = std::make_shared<spdlog::sinks::stdout_color_sink_mt>();
|
||||
console_sink->set_level(log_level);
|
||||
|
||||
// setting this to `true` can be useful for debugging on testnet
|
||||
bool rotate_on_open = false;
|
||||
|
||||
auto file_sink = std::make_shared<spdlog::sinks::rotating_file_sink_mt>(
|
||||
log_location, LOG_FILE_SIZE_LIMIT, EXTRA_FILES, rotate_on_open);
|
||||
file_sink->set_level(log_level);
|
||||
|
||||
auto developer_sink = std::make_shared<oxen::dev_sink_mt>();
|
||||
|
||||
/// IMPORTANT: get_logs endpoint assumes that sink #3 is a dev sink
|
||||
std::vector<spdlog::sink_ptr> sinks = {console_sink, file_sink,
|
||||
developer_sink};
|
||||
|
||||
auto logger = std::make_shared<spdlog::logger>("oxen_logger", sinks.begin(),
|
||||
sinks.end());
|
||||
logger->set_level(log_level);
|
||||
logger->flush_on(spdlog::level::err);
|
||||
|
||||
developer_sink->set_level(spdlog::level::warn);
|
||||
spdlog::register_logger(logger);
|
||||
spdlog::flush_every(std::chrono::seconds(1));
|
||||
|
||||
spdlog::set_pattern("[%Y-%m-%d %H:%M:%S.%e] [%^%l%$] %v");
|
||||
|
||||
OXEN_LOG(info,
|
||||
"\n**************************************************************"
|
||||
"\nOutputting logs to {}",
|
||||
log_location);
|
||||
}
|
||||
} // namespace oxen
|
BIN
contrib/deb.oxen.io.gpg
Normal file
BIN
contrib/deb.oxen.io.gpg
Normal file
Binary file not shown.
|
@ -6,10 +6,10 @@
|
|||
set -o errexit
|
||||
|
||||
anybad=
|
||||
for bin in httpserver/oxen-storage; do
|
||||
for bin in oxen-storage; do
|
||||
bad=
|
||||
if [ "$DRONE_STAGE_OS" == "darwin" ]; then
|
||||
if otool -L $bin | grep -Ev '^'$bin':|^\t(/usr/lib/libSystem\.|/usr/lib/libc\+\+\.|/usr/lib/libresolv\.|/System/Library/Frameworks/(CoreFoundation|IOKit|Security))'; then
|
||||
if otool -L $bin | grep -Ev '^'$bin':|^\s*(/usr/lib/libSystem\.|/usr/lib/libc\+\+\.|/usr/lib/libresolv\.|/System/Library/Frameworks/(CoreFoundation|IOKit|Security|SystemConfiguration))'; then
|
||||
bad=1
|
||||
fi
|
||||
elif [ "$DRONE_STAGE_OS" == "linux" ]; then
|
||||
|
|
7
contrib/drone-format-verify.sh
Executable file
7
contrib/drone-format-verify.sh
Executable file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
test "x$IGNORE" != "x" && exit 0
|
||||
set -e
|
||||
repo=$(readlink -e $(dirname $0)/../)
|
||||
clang-format-14 -i $(find $repo/{oxenss,unit_test} | grep -E '\.[hc](pp)?$' | grep -v 'Catch2')
|
||||
jsonnetfmt -i $repo/.drone.jsonnet
|
||||
git --no-pager diff --exit-code --color || (echo -ne '\n\n\e[31;1mLint check failed; please run ./contrib/format.sh\e[0m\n\n' ; exit 1)
|
41
contrib/format.sh
Executable file
41
contrib/format.sh
Executable file
|
@ -0,0 +1,41 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
CLANG_FORMAT_DESIRED_VERSION=14
|
||||
|
||||
binary=$(command -v clang-format-$CLANG_FORMAT_DESIRED_VERSION 2>/dev/null)
|
||||
if [ $? -ne 0 ]; then
|
||||
binary=$(command -v clang-format-mp-$CLANG_FORMAT_DESIRED_VERSION 2>/dev/null)
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
binary=$(command -v clang-format 2>/dev/null)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Please install clang-format version $CLANG_FORMAT_DESIRED_VERSION and re-run this script."
|
||||
exit 1
|
||||
fi
|
||||
version=$(clang-format --version)
|
||||
if [[ ! $version == *"clang-format version $CLANG_FORMAT_DESIRED_VERSION"* ]]; then
|
||||
echo "Please install clang-format version $CLANG_FORMAT_DESIRED_VERSION and re-run this script."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cd "$(dirname $0)/../"
|
||||
sources=$(find oxenss unit_test | grep -E '\.[hc](pp)?$' | grep -v '\#\|Catch2')
|
||||
if [ "$1" = "verify" ] ; then
|
||||
if [ $($binary --output-replacements-xml $sources | grep '</replacement>' | wc -l) -ne 0 ] ; then
|
||||
exit 2
|
||||
fi
|
||||
else
|
||||
$binary -i $sources &> /dev/null
|
||||
fi
|
||||
|
||||
jsonnet_format=$(command -v jsonnetfmt 2>/dev/null)
|
||||
if [ $? -eq 0 ]; then
|
||||
if [ "$1" = "verify" ]; then
|
||||
if ! $jsonnet_format --test .drone.jsonnet; then
|
||||
exit 4
|
||||
fi
|
||||
else
|
||||
$jsonnet_format --in-place .drone.jsonnet
|
||||
fi
|
||||
fi
|
94
contrib/omq-rpc.py
Executable file
94
contrib/omq-rpc.py
Executable file
|
@ -0,0 +1,94 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import nacl.bindings as sodium
|
||||
from nacl.public import PrivateKey
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
import nacl.encoding
|
||||
import requests
|
||||
import zmq
|
||||
import zmq.utils.z85
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import random
|
||||
import shutil
|
||||
|
||||
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.DEALER)
|
||||
socket.setsockopt(zmq.CONNECT_TIMEOUT, 5000)
|
||||
socket.setsockopt(zmq.HANDSHAKE_IVL, 5000)
|
||||
#socket.setsockopt(zmq.IMMEDIATE, 1)
|
||||
|
||||
if len(sys.argv) > 1 and any(sys.argv[1].startswith(x) for x in ("ipc://", "tcp://", "curve://")):
|
||||
remote = sys.argv[1]
|
||||
del sys.argv[1]
|
||||
else:
|
||||
remote = "ipc://./oxen.sock"
|
||||
|
||||
curve_pubkey = b''
|
||||
my_privkey, my_pubkey = b'', b''
|
||||
|
||||
# If given a curve://whatever/pubkey argument then transform it into 'tcp://whatever' and put the
|
||||
# 'pubkey' back into argv to be handled below.
|
||||
if remote.startswith("curve://"):
|
||||
pos = remote.rfind('/')
|
||||
pkhex = remote[pos+1:]
|
||||
remote = "tcp://" + remote[8:pos]
|
||||
if len(pkhex) != 64 or not all(x in "0123456789abcdefABCDEF" for x in pkhex):
|
||||
print("curve:// addresses must be in the form curve://HOST:PORT/REMOTE_PUBKEY_HEX", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
sys.argv[1:0] = [pkhex]
|
||||
|
||||
if len(sys.argv) > 1 and len(sys.argv[1]) == 64 and all(x in "0123456789abcdefABCDEF" for x in sys.argv[1]):
|
||||
curve_pubkey = bytes.fromhex(sys.argv[1])
|
||||
del sys.argv[1]
|
||||
socket.curve_serverkey = curve_pubkey
|
||||
if len(sys.argv) > 1 and len(sys.argv[1]) == 64 and all(x in "0123456789abcdefABCDEF" for x in sys.argv[1]):
|
||||
my_privkey = bytes.fromhex(sys.argv[1])
|
||||
del sys.argv[1]
|
||||
my_pubkey = zmq.utils.z85.decode(zmq.curve_public(zmq.utils.z85.encode(my_privkey)))
|
||||
else:
|
||||
my_privkey = PrivateKey.generate()
|
||||
my_pubkey = my_privkey.public_key.encode()
|
||||
my_privkey = my_privkey.encode()
|
||||
|
||||
print("No curve client privkey given; generated a random one (pubkey: {}, privkey: {})".format(
|
||||
my_pubkey.hex(), my_privkey.hex()), file=sys.stderr)
|
||||
socket.curve_secretkey = my_privkey
|
||||
socket.curve_publickey = my_pubkey
|
||||
|
||||
if not 2 <= len(sys.argv) <= 3 or any(x in y for x in ("--help", "-h") for y in sys.argv[1:]):
|
||||
print("Usage: {} [ipc:///path/to/sock|tcp://1.2.3.4:5678] [SERVER_CURVE_PUBKEY [LOCAL_CURVE_PRIVKEY]] COMMAND ['JSON']".format(
|
||||
sys.argv[0]), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
beginning_of_time = time.clock_gettime(time.CLOCK_MONOTONIC)
|
||||
|
||||
print("Connecting to {}".format(remote), file=sys.stderr)
|
||||
socket.connect(remote)
|
||||
to_send = [sys.argv[1].encode(), b'tagxyz123']
|
||||
to_send += (x.encode() for x in sys.argv[2:])
|
||||
print("Sending {}".format(to_send[0]), file=sys.stderr)
|
||||
socket.send_multipart(to_send)
|
||||
if socket.poll(timeout=5000):
|
||||
m = socket.recv_multipart()
|
||||
recv_time = time.clock_gettime(time.CLOCK_MONOTONIC)
|
||||
if len(m) < 3 or m[0:2] != [b'REPLY', b'tagxyz123']:
|
||||
print("Received unexpected {}-part reply:".format(len(m)), file=sys.stderr)
|
||||
for x in m:
|
||||
print("- {}".format(x))
|
||||
else:
|
||||
first = 3 if len(m) > 3 else 2
|
||||
print("Received {} reply in {:.6f}s:".format(m[2].decode() if first == 3 else "a", recv_time - beginning_of_time), file=sys.stderr)
|
||||
for x in m[first:]:
|
||||
print("{} bytes data part:".format(len(x)), file=sys.stderr)
|
||||
if any(x.startswith(y) for y in (b'd', b'l', b'i')) and x.endswith(b'e'):
|
||||
sys.stdout.buffer.write(x)
|
||||
else:
|
||||
print(x.decode(), end="\n\n")
|
||||
|
||||
else:
|
||||
print("Request timed out", file=sys.stderr)
|
||||
socket.close(linger=0)
|
||||
sys.exit(1)
|
358
contrib/onion-request.cpp
Normal file
358
contrib/onion-request.cpp
Normal file
|
@ -0,0 +1,358 @@
|
|||
// C++ backwards engineered command-line onion routing test tool.
|
||||
//
|
||||
// This makes onion requests via storage servers.
|
||||
//
|
||||
// It has a whole bunch of deps (cpr, oxenmq, sodium, ssl, nlohmann); I compiled with the following,
|
||||
// using static cpr from an oxen-core build, SS assets built in ../build, and system-installed
|
||||
// libsodium/libssl/nlohmann/oxenmq:
|
||||
//
|
||||
// g++ -std=c++17 -O2 onion-request.cpp -o onion-request ../../oxen-core/build/external/libcpr.a \
|
||||
// -I../../oxen-core/external/cpr/include ../build/crypto/libcrypto.a -loxenmq -lsodium -lcurl -lcrypto
|
||||
//
|
||||
|
||||
#include <oxenss/crypto/channel_encryption.hpp>
|
||||
#include <oxenss/crypto/keys.h>
|
||||
#include <cpr/cpr.h>
|
||||
#include <chrono>
|
||||
#include <exception>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <sodium.h>
|
||||
#include <oxenc/hex.h>
|
||||
#include <oxenc/base64.h>
|
||||
#include <oxenmq/oxenmq.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
extern "C" {
|
||||
#include <sys/param.h>
|
||||
}
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
using namespace oxen;
|
||||
using namespace oxen::crypto;
|
||||
|
||||
int usage(std::string_view argv0, std::string_view err = "") {
|
||||
if (!err.empty())
|
||||
std::cerr << "\x1b[31;1mError: " << err << "\x1b[0m\n\n";
|
||||
std::cerr << "Usage: " << argv0 << R"( [--mainnet] [--xchacha20|--aes-gcm|--aes-cbc|--random] SNODE_PK [SNODE_PK ...] PAYLOAD CONTROL
|
||||
|
||||
Sends an onion request via the given path
|
||||
|
||||
SNODE_PK should be primary (legacy) pubkey(s) on test (or mainnet if --mainnet is given).
|
||||
|
||||
--xchacha20 uses xchacha20+poly1305 encryption (which is the default);
|
||||
--aes-gcm and --aes-cbc use aes-gcm and aes-cbc, respectively, instead.
|
||||
--random uses a random encryption type for each hop.
|
||||
|
||||
PAYLOAD/CONTROL are values to pass to the request and should be:
|
||||
|
||||
Onion requests for SS and oxend:
|
||||
|
||||
Pass '{"headers":[]}' for CONTROL
|
||||
|
||||
PAYLOAD should be the JSON data; for example for an oxend request:
|
||||
|
||||
{"method": "oxend_request", "params": {"endpoint": "get_service_nodes", "params": {"limit": 5}}}
|
||||
|
||||
and for a swarm member lookup:
|
||||
|
||||
{"method": "get_snodes_for_pubkey", {"params": {"pubKey": user_pubkey}}}
|
||||
|
||||
Proxy requests should have an whatever data is to be posted in the PAYLOAD string and CONTROL set to
|
||||
the connection details such as:
|
||||
|
||||
{"host": "jagerman.com", "target": "/oxen/lsrpc"}
|
||||
|
||||
Both PAYLOAD and CONTROL may be passed filenames to read prefixed with `@` (for example:
|
||||
@payload.data, @/path/to/control.json)
|
||||
|
||||
)";
|
||||
return 1;
|
||||
}
|
||||
|
||||
const oxenmq::address TESTNET_OMQ{"tcp://public.loki.foundation:9999"};
|
||||
const oxenmq::address MAINNET_OMQ{"tcp://public.loki.foundation:22029"};
|
||||
|
||||
void onion_request(std::string ip, uint16_t port, std::vector<std::pair<ed25519_pubkey, x25519_pubkey>> keys,
|
||||
bool mainnet, std::optional<EncryptType> enc_type, std::string_view payload, std::string_view control);
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
std::vector<std::string_view> pubkeys_hex;
|
||||
std::vector<legacy_pubkey> pubkeys;
|
||||
auto omq_addr = TESTNET_OMQ;
|
||||
std::optional<EncryptType> enc_type = EncryptType::xchacha20;
|
||||
std::string payload, control;
|
||||
for (int i = 1; i < argc; i++) {
|
||||
std::string_view arg{argv[i]};
|
||||
if (arg == "--mainnet"sv) { omq_addr = MAINNET_OMQ; continue; }
|
||||
if (arg == "--testnet"sv) { omq_addr = TESTNET_OMQ; continue; }
|
||||
if (arg == "--xchacha20"sv) { enc_type = EncryptType::xchacha20; continue; }
|
||||
if (arg == "--aes-gcm"sv) { enc_type = EncryptType::aes_gcm; continue; }
|
||||
if (arg == "--aes-cbc"sv) { enc_type = EncryptType::aes_cbc; continue; }
|
||||
if (arg == "--random"sv) { enc_type = std::nullopt; continue; }
|
||||
|
||||
bool hex = arg.size() > 0 && oxenc::is_hex(arg);
|
||||
if (i >= argc - 2) {
|
||||
if (hex)
|
||||
return usage(argv[0], "Missing PAYLOAD and CONTROL values");
|
||||
|
||||
// Could parse control to make sure it's valid json here, but it can be useful to
|
||||
// deliberate send invalid json for testing purposes to see how the remote handles it.
|
||||
auto& var = (i == argc - 2 ? payload : control);
|
||||
var = arg;
|
||||
if (!var.empty() && var.front() == '@') {
|
||||
std::ifstream f;
|
||||
f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
|
||||
f.open(var.data()+1, std::ios::in | std::ios::binary);
|
||||
var.clear();
|
||||
var.append(std::istreambuf_iterator<char>{f}, std::istreambuf_iterator<char>{});
|
||||
}
|
||||
} else {
|
||||
if (!(hex && arg.size() == 64))
|
||||
return usage(argv[0], "Invalid pubkey '" + std::string{arg} + "'");
|
||||
pubkeys_hex.push_back(arg);
|
||||
pubkeys.push_back(legacy_pubkey::from_hex(arg));
|
||||
}
|
||||
}
|
||||
if (pubkeys.empty()) return usage(argv[0]);
|
||||
|
||||
oxenmq::OxenMQ omq{};
|
||||
omq.start();
|
||||
std::promise<void> got;
|
||||
auto got_fut = got.get_future();
|
||||
auto rpc = omq.connect_remote(omq_addr,
|
||||
[](auto) {},
|
||||
[&got, omq_addr](auto, auto err) {
|
||||
try { throw std::runtime_error{"Failed to connect to oxend @ " + omq_addr.full_address() + ": " + std::string{err}}; }
|
||||
catch (...) { got.set_exception(std::current_exception()); }
|
||||
});
|
||||
std::string first_ip;
|
||||
uint16_t first_port = 0;
|
||||
std::unordered_map<legacy_pubkey, std::pair<ed25519_pubkey, x25519_pubkey>> aux_keys;
|
||||
omq.request(rpc, "rpc.get_service_nodes", [&](bool success, std::vector<std::string> data) {
|
||||
try {
|
||||
if (!success || data[0] != "200")
|
||||
throw std::runtime_error{"get_service_nodes request failed: " + data[0]};
|
||||
|
||||
auto json = nlohmann::json::parse(data[1]);
|
||||
auto sns = json.at("service_node_states");
|
||||
for (auto& sn : sns) {
|
||||
auto& pk = sn.at("service_node_pubkey").get_ref<const std::string&>();
|
||||
auto& e = sn.at("pubkey_ed25519").get_ref<const std::string&>();
|
||||
auto& x = sn.at("pubkey_x25519").get_ref<const std::string&>();
|
||||
if (e.size() != 64 || x.size() != 64 || !oxenc::is_hex(x) || !oxenc::is_hex(e))
|
||||
throw std::runtime_error{sn.at("service_node_pubkey").get<std::string>() + " is missing ed/x25519 pubkeys"};
|
||||
aux_keys.emplace(legacy_pubkey::from_hex(pk),
|
||||
std::make_pair(ed25519_pubkey::from_hex(e), x25519_pubkey::from_hex(x)));
|
||||
if (pk == pubkeys_hex.front()) {
|
||||
first_ip = sn.at("public_ip").get<std::string>();
|
||||
first_port = sn.at("storage_port").get<uint16_t>();
|
||||
}
|
||||
}
|
||||
got.set_value();
|
||||
}
|
||||
catch (...) { got.set_exception(std::current_exception()); }
|
||||
}, nlohmann::json{
|
||||
{"service_node_pubkeys", pubkeys_hex},
|
||||
{"fields", {
|
||||
{"service_node_pubkey", true},
|
||||
{"pubkey_x25519", true},
|
||||
{"pubkey_ed25519", true},
|
||||
{"public_ip", true},
|
||||
{"storage_port", true},
|
||||
}},
|
||||
{"active_only", true},
|
||||
}.dump()
|
||||
);
|
||||
|
||||
try {
|
||||
got_fut.get();
|
||||
std::vector<std::pair<ed25519_pubkey, x25519_pubkey>> chain;
|
||||
for (auto& pk : pubkeys) {
|
||||
if (auto it = aux_keys.find(pk); it != aux_keys.end())
|
||||
chain.push_back(it->second);
|
||||
else
|
||||
std::cerr << pk << " is not an active SN\n";
|
||||
}
|
||||
if (chain.size() != pubkeys.size()) throw std::runtime_error{"Missing x25519 pubkeys"};
|
||||
if (chain.empty()) throw std::runtime_error{"Need at least one SN pubkey"};
|
||||
|
||||
if (first_ip.empty() || !first_port)
|
||||
throw std::runtime_error{"Missing IP/port of first hop"};
|
||||
|
||||
onion_request(first_ip, first_port, std::move(chain), omq_addr == MAINNET_OMQ,
|
||||
enc_type, payload, control);
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error: " << e.what();
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
|
||||
std::string encode_size(uint32_t s) {
|
||||
std::string str{reinterpret_cast<const char*>(&s), 4};
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
std::swap(str[0], str[3]);
|
||||
std::swap(str[1], str[2]);
|
||||
#elif __BYTE_ORDER != __LITTLE_ENDIAN
|
||||
#error Unknown endianness
|
||||
#endif
|
||||
return str;
|
||||
}
|
||||
|
||||
static std::mt19937_64 rng{std::random_device{}()};
|
||||
EncryptType random_etype() {
|
||||
std::uniform_int_distribution<int> dist{0, 2};
|
||||
size_t i = dist(rng);
|
||||
return i == 0 ? EncryptType::aes_cbc :
|
||||
i == 1 ? EncryptType::aes_gcm :
|
||||
EncryptType::xchacha20;
|
||||
}
|
||||
|
||||
void onion_request(std::string ip, uint16_t port, std::vector<std::pair<ed25519_pubkey, x25519_pubkey>> keys, bool mainnet,
|
||||
std::optional<EncryptType> enc_type, std::string_view payload, std::string_view control) {
|
||||
std::string_view user_pubkey = "05fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210";
|
||||
if (!mainnet) user_pubkey.remove_prefix(2);
|
||||
|
||||
std::string blob;
|
||||
|
||||
std::cerr << "Building " << (keys.size()-1) << "-hop onion request\n";
|
||||
// First hop:
|
||||
//
|
||||
// [N][ENCRYPTED]{json}
|
||||
//
|
||||
// where json has the ephemeral_key indicating how we encrypted ENCRYPTED for this first hop.
|
||||
// The first hop decrypts ENCRYPTED into:
|
||||
//
|
||||
// [N][BLOB]{json}
|
||||
//
|
||||
// where [N] is the length of the blob and {json} now contains either:
|
||||
// - a "headers" key with an empty value. This is how we indicate that the request is for this
|
||||
// node as the final hop, and means that the BLOB is actually JSON it should parse to get the
|
||||
// request info (which has "method", "params", etc. in it).
|
||||
// - "host"/"target"/"port"/"protocol" asking for an HTTP or HTTPS proxy request to be made
|
||||
// (though "target" must start with /loki/ or /oxen/ and end with /lsrpc). (There is still a
|
||||
// blob here, but it is not used and typically empty).
|
||||
// - "destination" and "ephemeral_key" to forward the request to the next hop.
|
||||
//
|
||||
// This later case continues onion routing by giving us something like:
|
||||
//
|
||||
// {"destination":"ed25519pubkey","ephemeral_key":"x25519-eph-pubkey-for-decryption","enc_type":"xchacha20"}
|
||||
//
|
||||
// (enc_type can also be aes-gcm, and defaults to that if not specified). We forward this via
|
||||
// oxenmq to the given ed25519pubkey (but since oxenmq uses x25519 pubkeys we first have to go
|
||||
// look it up), sending an oxenmq request to sn.onion_req_v2 of the following (but bencoded, not
|
||||
// json):
|
||||
//
|
||||
// { "d": "BLOB", "ek": "ephemeral-key-in-binary", "et": "xchacha20", "nh": N }
|
||||
//
|
||||
// where BLOB is the opaque data received from the previous hop and N is the hop number which
|
||||
// gets incremented at each hop (and terminates if it exceeds 15). That next hop decrypts BLOB,
|
||||
// giving it a value interpreted as the same [N][BLOB]{json} as above, and we recurse.
|
||||
//
|
||||
// On the *return* trip, the message gets encrypted (once!) at the final destination using the
|
||||
// derived key from the pubkey given to the final hop, base64-encoded, then passed back without
|
||||
// any onion encryption at all all the way back to the client.
|
||||
|
||||
// Ephemeral keypair:
|
||||
x25519_pubkey A;
|
||||
x25519_seckey a;
|
||||
x25519_pubkey final_pubkey;
|
||||
x25519_seckey final_seckey;
|
||||
EncryptType last_etype;
|
||||
EncryptType final_etype;
|
||||
|
||||
auto it = keys.rbegin();
|
||||
{
|
||||
crypto_box_keypair(A.data(), a.data());
|
||||
ChannelEncryption e{a, A, false};
|
||||
|
||||
auto data = encode_size(payload.size());
|
||||
data += payload;
|
||||
data += control;
|
||||
|
||||
last_etype = final_etype = enc_type.value_or(random_etype());
|
||||
#ifndef NDEBUG
|
||||
std::cerr << "Encrypting for final hop using " << to_string(last_etype) << "/" << A << "\n";
|
||||
#endif
|
||||
blob = e.encrypt(last_etype, data, keys.back().second);
|
||||
// Save these because we need them again to decrypt the final response:
|
||||
final_seckey = a;
|
||||
final_pubkey = A;
|
||||
}
|
||||
|
||||
for (it++; it != keys.rend(); it++) {
|
||||
// Routing data for this hop:
|
||||
nlohmann::json routing{
|
||||
{"destination", std::prev(it)->first.hex()}, // Next hop's ed25519 key
|
||||
{"ephemeral_key", A.hex()}, // The x25519 ephemeral_key here is the key for the *next* hop to use
|
||||
{"enc_type", to_string(last_etype)},
|
||||
};
|
||||
|
||||
blob = encode_size(blob.size()) + blob + routing.dump();
|
||||
|
||||
// Generate eph key for *this* request and encrypt it:
|
||||
crypto_box_keypair(A.data(), a.data());
|
||||
ChannelEncryption e{a, A, false};
|
||||
last_etype = enc_type.value_or(random_etype());
|
||||
|
||||
#ifndef NDEBUG
|
||||
std::cerr << "Encrypting for next-last hop using " << to_string(last_etype) << "/" << A << "\n";
|
||||
#endif
|
||||
blob = e.encrypt(last_etype, blob, it->second);
|
||||
}
|
||||
|
||||
// The data going to the first hop needs to be wrapped in one more layer to tell the first hop
|
||||
// how to decrypt the initial payload:
|
||||
blob = encode_size(blob.size()) + blob + nlohmann::json{
|
||||
{"ephemeral_key", A.hex()}, {"enc_type", to_string(last_etype)}}.dump();
|
||||
|
||||
cpr::Url target{"https://" + ip + ":" + std::to_string(port) + "/onion_req/v2"};
|
||||
std::cerr << "Posting " << blob.size() << " onion blob to " << target.str() << " for entry node\n";
|
||||
auto started = std::chrono::steady_clock::now();
|
||||
auto res = cpr::Post(target,
|
||||
cpr::Body{blob},
|
||||
cpr::VerifySsl{false});
|
||||
auto finished = std::chrono::steady_clock::now();
|
||||
|
||||
std::cerr << "Got '" << res.status_line << "' onion request response in " <<
|
||||
std::chrono::duration<double>(finished - started).count() << "s\n";
|
||||
for (auto& [k, v] : res.header)
|
||||
std::cerr << "- " << k << ": " << v << "\n";
|
||||
|
||||
if (res.text.empty()) {
|
||||
std::cerr << "Request returned empty body\n";
|
||||
return;
|
||||
}
|
||||
|
||||
// Nothing in the response tells us how it is encoded so we have to guess; the client normally
|
||||
// *does* know because it specifies `"base64": false` if it wants binary, but I don't want to
|
||||
// parse and guess what we should do, so we'll just guess.
|
||||
ChannelEncryption d{final_seckey, final_pubkey, false};
|
||||
bool decrypted = false;
|
||||
auto body = std::move(res.text);
|
||||
auto orig_size = body.size();
|
||||
try { body = d.decrypt(final_etype, body, keys.back().second); decrypted = true; }
|
||||
catch (...) {}
|
||||
|
||||
if (decrypted) {
|
||||
std::cerr << "Body is " << orig_size << " encrypted bytes, decrypted to " << body.size() << " bytes:\n";
|
||||
} else if (oxenc::is_base64(body)) {
|
||||
body = oxenc::from_base64(body);
|
||||
std::cerr << "Body was " << orig_size << " base64 bytes; decoded to " << body.size() << " bytes";
|
||||
try { body = d.decrypt(final_etype, body, keys.back().second); decrypted = true; }
|
||||
catch (...) {}
|
||||
if (decrypted)
|
||||
std::cerr << "; decrypted to " << body.size() << " bytes:\n";
|
||||
else
|
||||
std::cerr << "; not encrypted (or decryption failed)\n";
|
||||
} else {
|
||||
std::cerr << "Body is " << body.size() << " bytes (not base64-encoded, not encrypted [or decryption failed])\n";
|
||||
}
|
||||
std::cerr << std::flush;
|
||||
|
||||
std::cout << body;
|
||||
if (!body.empty() && body.back() != '\n')
|
||||
std::cout << '\n';
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
add_library(crypto STATIC
|
||||
src/oxend_key.cpp
|
||||
src/channel_encryption.cpp
|
||||
src/signature.cpp
|
||||
${CMAKE_CURRENT_LIST_DIR}/../vendors/oxen/crypto-ops/crypto-ops.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/../vendors/oxen/crypto-ops/crypto-ops-data.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/../vendors/oxen/crypto-ops/keccak.c
|
||||
${CMAKE_CURRENT_LIST_DIR}/../vendors/oxen/crypto-ops/hash-ops.c
|
||||
)
|
||||
|
||||
target_link_libraries(crypto PRIVATE OpenSSL::SSL)
|
||||
|
||||
target_include_directories(crypto
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_LIST_DIR}/include
|
||||
PRIVATE
|
||||
${CMAKE_CURRENT_LIST_DIR}/../vendors
|
||||
)
|
||||
|
||||
target_link_libraries(crypto PUBLIC utils)
|
||||
|
||||
add_executable(crypto_test src/test_main.cpp)
|
||||
target_link_libraries(crypto_test PRIVATE crypto)
|
||||
|
||||
find_package(Threads)
|
||||
|
||||
target_link_libraries(crypto PRIVATE
|
||||
sodium
|
||||
Boost::boost
|
||||
oxenmq::oxenmq
|
||||
filesystem
|
||||
Threads::Threads
|
||||
${CMAKE_DL_LIBS})
|
|
@ -1,24 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
// Why is this even a template??
|
||||
template <typename T>
|
||||
class ChannelEncryption {
|
||||
public:
|
||||
ChannelEncryption(const std::vector<uint8_t>& private_key);
|
||||
~ChannelEncryption() = default;
|
||||
|
||||
T encrypt_cbc(const T& plainText, const std::string& pubKey) const;
|
||||
|
||||
T encrypt_gcm(const T& plainText, const std::string& pubKey) const;
|
||||
|
||||
T decrypt_cbc(const T& cipherText, const std::string& pubKey) const;
|
||||
|
||||
T decrypt_gcm(const T& cipherText, const std::string& pubKey) const;
|
||||
|
||||
private:
|
||||
const std::vector<uint8_t> private_key_;
|
||||
};
|
|
@ -1,32 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
constexpr size_t KEY_LENGTH = 32;
|
||||
using public_key_t = std::array<uint8_t, KEY_LENGTH>;
|
||||
using private_key_t = std::array<uint8_t, KEY_LENGTH>;
|
||||
|
||||
struct private_key_ed25519_t {
|
||||
static constexpr uint32_t LENGTH = 64;
|
||||
std::array<uint8_t, private_key_ed25519_t::LENGTH> data;
|
||||
static private_key_ed25519_t from_hex(const std::string& sc_hex);
|
||||
};
|
||||
|
||||
struct oxend_key_pair_t {
|
||||
private_key_t private_key;
|
||||
public_key_t public_key;
|
||||
};
|
||||
|
||||
std::string key_to_string(const std::array<uint8_t, oxen::KEY_LENGTH>& key);
|
||||
|
||||
private_key_t oxendKeyFromHex(const std::string& private_key_hex);
|
||||
|
||||
public_key_t derive_pubkey_legacy(const private_key_t& private_key);
|
||||
public_key_t derive_pubkey_x25519(const private_key_t& private_key);
|
||||
public_key_t derive_pubkey_ed25519(const private_key_ed25519_t& private_key);
|
||||
|
||||
} // namespace oxen
|
|
@ -1,29 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxend_key.h"
|
||||
|
||||
#include <array>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
constexpr size_t HASH_SIZE = 32;
|
||||
constexpr size_t EC_SCALAR_SIZE = 32;
|
||||
|
||||
using hash = std::array<uint8_t, HASH_SIZE>;
|
||||
using ec_scalar = std::array<uint8_t, EC_SCALAR_SIZE>;
|
||||
|
||||
struct signature {
|
||||
ec_scalar c, r;
|
||||
};
|
||||
|
||||
hash hash_data(const std::string& data);
|
||||
|
||||
signature generate_signature(const hash& prefix_hash,
|
||||
const oxend_key_pair_t& key_pair);
|
||||
|
||||
bool check_signature(const std::string& signature, const hash& hash,
|
||||
const std::string& public_key_t_b32z);
|
||||
bool check_signature(const signature& sig, const hash& prefix_hash,
|
||||
const public_key_t& pub);
|
||||
|
||||
} // namespace oxen
|
|
@ -1,246 +0,0 @@
|
|||
#include "channel_encryption.hpp"
|
||||
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/rand.h>
|
||||
#include <sodium.h>
|
||||
#include <oxenmq/hex.h>
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
#include <exception>
|
||||
#include <string>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
std::vector<uint8_t> hexToBytes(const std::string& hex) {
|
||||
std::vector<uint8_t> temp;
|
||||
if (!oxenmq::is_hex(hex)) throw std::runtime_error{"input is not hex"};
|
||||
temp.reserve(hex.size() / 2);
|
||||
oxenmq::from_hex(hex.begin(), hex.end(), std::back_inserter(temp));
|
||||
return temp;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ChannelEncryption<T>::ChannelEncryption(const std::vector<uint8_t>& private_key)
|
||||
: private_key_(private_key) {}
|
||||
|
||||
// Derive shared secret from our (ephemeral) `seckey` and the other party's
|
||||
// `pubkey`
|
||||
static std::vector<uint8_t>
|
||||
calculate_shared_secret(const std::vector<uint8_t>& seckey,
|
||||
const std::vector<uint8_t>& pubkey) {
|
||||
|
||||
std::vector<uint8_t> secret(crypto_scalarmult_BYTES);
|
||||
if (pubkey.size() != crypto_scalarmult_curve25519_BYTES) {
|
||||
throw std::runtime_error("Bad pubKey size");
|
||||
}
|
||||
|
||||
if (crypto_scalarmult(secret.data(), seckey.data(), pubkey.data()) != 0) {
|
||||
throw std::runtime_error(
|
||||
"Shared key derivation failed (crypto_scalarmult)");
|
||||
}
|
||||
return secret;
|
||||
}
|
||||
|
||||
static std::vector<uint8_t>
|
||||
derive_symmetric_key(const std::vector<uint8_t>& seckey,
|
||||
const std::vector<uint8_t>& pubkey) {
|
||||
|
||||
const std::vector<uint8_t> sharedKey =
|
||||
calculate_shared_secret(seckey, pubkey);
|
||||
|
||||
std::vector<uint8_t> derived_key(32);
|
||||
|
||||
const std::string salt_str = "LOKI";
|
||||
const auto salt = reinterpret_cast<const unsigned char*>(salt_str.data());
|
||||
|
||||
crypto_auth_hmacsha256_state state;
|
||||
|
||||
crypto_auth_hmacsha256_init(&state, salt, salt_str.size());
|
||||
crypto_auth_hmacsha256_update(&state, sharedKey.data(), sharedKey.size());
|
||||
crypto_auth_hmacsha256_final(&state, derived_key.data());
|
||||
|
||||
return derived_key;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T ChannelEncryption<T>::encrypt_cbc(const T& plaintext,
|
||||
const std::string& pubKey) const {
|
||||
const std::vector<uint8_t> pubKeyBytes = hexToBytes(pubKey);
|
||||
const std::vector<uint8_t> sharedKey =
|
||||
calculate_shared_secret(this->private_key_, pubKeyBytes);
|
||||
|
||||
// Initialise cipher
|
||||
const EVP_CIPHER* cipher = EVP_aes_256_cbc();
|
||||
const int ivLength = EVP_CIPHER_iv_length(cipher);
|
||||
|
||||
// Generate IV
|
||||
unsigned char iv[ivLength];
|
||||
if (RAND_bytes(iv, ivLength) != 1) {
|
||||
throw std::runtime_error("Could not generate IV");
|
||||
}
|
||||
|
||||
// Initialise cipher context
|
||||
EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new();
|
||||
if (EVP_EncryptInit_ex(ctx, cipher, NULL, sharedKey.data(), iv) <= 0) {
|
||||
throw std::runtime_error("Could not initialise encryption context");
|
||||
}
|
||||
|
||||
int len;
|
||||
size_t ciphertext_len = 0;
|
||||
auto p = reinterpret_cast<const unsigned char*>(plaintext.data());
|
||||
const size_t plaintext_len = plaintext.size();
|
||||
|
||||
// Add some padding of 'blockSize' as upper limit
|
||||
const int blockSize = EVP_CIPHER_CTX_block_size(ctx);
|
||||
T output;
|
||||
output.resize(plaintext_len + blockSize);
|
||||
auto o = reinterpret_cast<unsigned char*>(&output[0]);
|
||||
|
||||
// Encrypt every full blocks
|
||||
if (EVP_EncryptUpdate(ctx, o, &len, p, plaintext_len) <= 0) {
|
||||
throw std::runtime_error("Could not encrypt plaintext");
|
||||
}
|
||||
ciphertext_len += len;
|
||||
|
||||
// Encrypt any remaining partial blocks
|
||||
if (EVP_EncryptFinal_ex(ctx, o + len, &len) <= 0) {
|
||||
throw std::runtime_error("Could not finalise encryption");
|
||||
}
|
||||
ciphertext_len += len;
|
||||
|
||||
// Remove excess padding
|
||||
output.resize(ciphertext_len);
|
||||
|
||||
// Insert iv at the start
|
||||
output.insert(output.begin(), iv, iv + ivLength);
|
||||
|
||||
EVP_CIPHER_CTX_free(ctx);
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T ChannelEncryption<T>::encrypt_gcm(const T& plaintext,
|
||||
const std::string& pubKey) const {
|
||||
const std::vector<uint8_t> pubKeyBytes = hexToBytes(pubKey);
|
||||
const std::vector<uint8_t> derived_key =
|
||||
derive_symmetric_key(this->private_key_, pubKeyBytes);
|
||||
|
||||
T ciphertext;
|
||||
// Ciphertext should always be the length of plaintext plus tag
|
||||
ciphertext.resize(plaintext.size() + 16);
|
||||
|
||||
auto ciphertext_ptr = reinterpret_cast<unsigned char*>(&ciphertext[0]);
|
||||
|
||||
unsigned long long ciphertext_len;
|
||||
|
||||
const auto plaintext_ptr =
|
||||
reinterpret_cast<const unsigned char*>(&plaintext[0]);
|
||||
|
||||
unsigned char nonce[crypto_aead_aes256gcm_NPUBBYTES];
|
||||
randombytes_buf(nonce, sizeof(nonce));
|
||||
|
||||
crypto_aead_aes256gcm_encrypt(ciphertext_ptr, &ciphertext_len,
|
||||
plaintext_ptr, plaintext.size(), NULL, 0,
|
||||
NULL, nonce, derived_key.data());
|
||||
|
||||
ciphertext.resize(ciphertext_len);
|
||||
|
||||
ciphertext.insert(ciphertext.begin(), std::begin(nonce), std::end(nonce));
|
||||
|
||||
// nonce (12 bytes) || ciphertext || tag (16 bytes)
|
||||
return ciphertext;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T ChannelEncryption<T>::decrypt_gcm(const T& iv_ciphertext_tag,
|
||||
const std::string& pubKey) const {
|
||||
const std::vector<uint8_t> pubKeyBytes = hexToBytes(pubKey);
|
||||
const std::vector<uint8_t> derived_key =
|
||||
derive_symmetric_key(this->private_key_, pubKeyBytes);
|
||||
|
||||
T output;
|
||||
|
||||
// Plaintext should be (16 + 12) bytes shorter
|
||||
output.resize(iv_ciphertext_tag.size() - 28);
|
||||
|
||||
auto outPtr = reinterpret_cast<unsigned char*>(&output[0]);
|
||||
|
||||
unsigned long long decrypted_len;
|
||||
|
||||
constexpr auto NONCE_SIZE = 12;
|
||||
const auto ciphertext = reinterpret_cast<const unsigned char*>(
|
||||
&iv_ciphertext_tag[0] + NONCE_SIZE);
|
||||
|
||||
const auto nonce =
|
||||
reinterpret_cast<const unsigned char*>(&iv_ciphertext_tag[0]);
|
||||
|
||||
unsigned long long clen = iv_ciphertext_tag.size() - NONCE_SIZE;
|
||||
|
||||
if (crypto_aead_aes256gcm_decrypt(
|
||||
outPtr, &decrypted_len, NULL /* must be null */, ciphertext, clen,
|
||||
NULL, 0, nonce, derived_key.data()) != 0) {
|
||||
throw std::runtime_error("Could not decrypt (AES-GCM)");
|
||||
}
|
||||
|
||||
assert(output.size() == decrypted_len);
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T ChannelEncryption<T>::decrypt_cbc(const T& ciphertextAndIV,
|
||||
const std::string& pubKey) const {
|
||||
const std::vector<uint8_t> pubKeyBytes = hexToBytes(pubKey);
|
||||
const std::vector<uint8_t> sharedKey =
|
||||
calculate_shared_secret(this->private_key_, pubKeyBytes);
|
||||
|
||||
// Initialise cipher
|
||||
const EVP_CIPHER* cipher = EVP_aes_256_cbc();
|
||||
const int ivLength = EVP_CIPHER_iv_length(cipher);
|
||||
|
||||
auto inPtr = reinterpret_cast<const unsigned char*>(&ciphertextAndIV[0]);
|
||||
|
||||
// Initialise cipher context
|
||||
EVP_CIPHER_CTX* ctx = EVP_CIPHER_CTX_new();
|
||||
if (EVP_DecryptInit_ex(ctx, cipher, NULL, sharedKey.data(), inPtr) <= 0) {
|
||||
throw std::runtime_error("Could not initialise decryption context");
|
||||
}
|
||||
|
||||
int len;
|
||||
size_t plaintextLength = 0;
|
||||
const size_t ciphertextLength = ciphertextAndIV.size() - ivLength;
|
||||
|
||||
// Add some padding of 'blockSize' as upper limit
|
||||
const int blockSize = EVP_CIPHER_CTX_block_size(ctx);
|
||||
T output;
|
||||
output.resize(ciphertextLength + blockSize);
|
||||
|
||||
auto outPtr = reinterpret_cast<unsigned char*>(&output[0]);
|
||||
|
||||
// Decrypt every full blocks
|
||||
if (EVP_DecryptUpdate(ctx, outPtr, &len, inPtr + ivLength,
|
||||
ciphertextLength) <= 0) {
|
||||
throw std::runtime_error("Could not decrypt block");
|
||||
}
|
||||
plaintextLength += len;
|
||||
|
||||
// Decrypt any remaining partial blocks
|
||||
if (EVP_DecryptFinal_ex(ctx, outPtr + len, &len) <= 0) {
|
||||
throw std::runtime_error("Could not finalise decryption");
|
||||
}
|
||||
plaintextLength += len;
|
||||
|
||||
// Remove excess bytes
|
||||
output.resize(plaintextLength);
|
||||
|
||||
// Don't we need to call free even when we throw??
|
||||
EVP_CIPHER_CTX_free(ctx);
|
||||
return output;
|
||||
}
|
||||
|
||||
// explicit template specialization
|
||||
template class ChannelEncryption<std::string>;
|
||||
|
||||
template class ChannelEncryption<std::vector<uint8_t>>;
|
|
@ -1,69 +0,0 @@
|
|||
#include "oxend_key.h"
|
||||
#include "utils.hpp"
|
||||
|
||||
#include <sodium.h>
|
||||
#include <oxenmq/hex.h>
|
||||
|
||||
#include <exception>
|
||||
#include <fstream>
|
||||
#include <iterator>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
private_key_t oxendKeyFromHex(const std::string& private_key_hex) {
|
||||
if (!oxenmq::is_hex(private_key_hex) || private_key_hex.size() != KEY_LENGTH * 2)
|
||||
throw std::runtime_error("Oxend key data is invalid: expected " +
|
||||
std::to_string(KEY_LENGTH) + " hex digits not " +
|
||||
std::to_string(private_key_hex.size()) +
|
||||
" bytes");
|
||||
|
||||
private_key_t private_key;
|
||||
oxenmq::from_hex(private_key_hex.begin(), private_key_hex.end(), private_key.begin());
|
||||
|
||||
return private_key;
|
||||
}
|
||||
|
||||
private_key_ed25519_t
|
||||
private_key_ed25519_t::from_hex(const std::string& sc_hex) {
|
||||
if (sc_hex.size() != private_key_ed25519_t::LENGTH * 2)
|
||||
throw std::runtime_error("Oxend key data is invalid: expected " +
|
||||
std::to_string(private_key_ed25519_t::LENGTH) +
|
||||
" hex digits not " + std::to_string(sc_hex.size()) +
|
||||
" bytes");
|
||||
|
||||
private_key_ed25519_t key;
|
||||
oxenmq::from_hex(sc_hex.begin(), sc_hex.end(), key.data.begin());
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
public_key_t derive_pubkey_legacy(const private_key_t& private_key) {
|
||||
public_key_t publicKey;
|
||||
crypto_scalarmult_ed25519_base_noclamp(publicKey.data(),
|
||||
private_key.data());
|
||||
|
||||
return publicKey;
|
||||
}
|
||||
|
||||
public_key_t derive_pubkey_x25519(const private_key_t& seckey) {
|
||||
|
||||
public_key_t pubkey;
|
||||
crypto_scalarmult_curve25519_base(pubkey.data(), seckey.data());
|
||||
|
||||
return pubkey;
|
||||
}
|
||||
|
||||
public_key_t derive_pubkey_ed25519(const private_key_ed25519_t& seckey) {
|
||||
|
||||
public_key_t pubkey;
|
||||
crypto_sign_ed25519_sk_to_pk(pubkey.data(), seckey.data.data());
|
||||
|
||||
return pubkey;
|
||||
}
|
||||
|
||||
std::string key_to_string(const std::array<uint8_t, oxen::KEY_LENGTH>& key) {
|
||||
auto pk = reinterpret_cast<const char*>(&key);
|
||||
return std::string{pk, oxen::KEY_LENGTH};
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,149 +0,0 @@
|
|||
#include "signature.h"
|
||||
#include "utils.hpp"
|
||||
|
||||
extern "C" {
|
||||
#include "oxen/crypto-ops/crypto-ops.h"
|
||||
#include "oxen/crypto-ops/hash-ops.h"
|
||||
}
|
||||
|
||||
#include <sodium/crypto_generichash.h>
|
||||
#include <sodium/crypto_generichash_blake2b.h>
|
||||
#include <sodium/randombytes.h>
|
||||
#include <oxenmq/base32z.h>
|
||||
#include <oxenmq/base64.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring> // for memcmp
|
||||
#include <iterator>
|
||||
#include <string>
|
||||
|
||||
static_assert(crypto_generichash_BYTES == oxen::HASH_SIZE, "Wrong hash size!");
|
||||
|
||||
namespace oxen {
|
||||
|
||||
using ec_point = std::array<uint8_t, 32>;
|
||||
struct s_comm {
|
||||
uint8_t h[32];
|
||||
uint8_t key[32];
|
||||
uint8_t comm[32];
|
||||
};
|
||||
|
||||
void random_scalar(ec_scalar& k) {
|
||||
for (size_t i = 0; i < k.size() / 4; ++i) {
|
||||
const uint32_t random = randombytes_random();
|
||||
k[i + 0] = (random & 0xFF000000) >> 24;
|
||||
k[i + 1] = (random & 0x00FF0000) >> 16;
|
||||
k[i + 2] = (random & 0x0000FF00) >> 8;
|
||||
k[i + 3] = (random & 0x000000FF) >> 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool hash_to_scalar(const void* input, size_t size, ec_scalar& output) {
|
||||
cn_fast_hash(input, size, reinterpret_cast<char*>(output.data()));
|
||||
sc_reduce32(output.data());
|
||||
return true;
|
||||
}
|
||||
|
||||
hash hash_data(const std::string& data) {
|
||||
hash hash{{0}};
|
||||
crypto_generichash(hash.data(), hash.size(),
|
||||
reinterpret_cast<const unsigned char*>(data.c_str()),
|
||||
data.size(), nullptr, 0);
|
||||
return hash;
|
||||
}
|
||||
|
||||
signature generate_signature(const hash& prefix_hash,
|
||||
const oxend_key_pair_t& key_pair) {
|
||||
ge_p3 tmp3;
|
||||
ec_scalar k;
|
||||
s_comm buf;
|
||||
signature sig;
|
||||
#if !defined(NDEBUG)
|
||||
{
|
||||
ge_p3 t;
|
||||
public_key_t t2;
|
||||
assert(sc_check(key_pair.private_key.data()) == 0);
|
||||
ge_scalarmult_base(&t, key_pair.private_key.data());
|
||||
ge_p3_tobytes(t2.data(), &t);
|
||||
assert(key_pair.public_key == t2);
|
||||
}
|
||||
#endif
|
||||
std::copy(prefix_hash.begin(), prefix_hash.end(), std::begin(buf.h));
|
||||
std::copy(key_pair.public_key.begin(), key_pair.public_key.end(),
|
||||
std::begin(buf.key));
|
||||
try_again:
|
||||
random_scalar(k);
|
||||
if (k[7] == 0) // we don't want tiny numbers here
|
||||
goto try_again;
|
||||
ge_scalarmult_base(&tmp3, k.data());
|
||||
ge_p3_tobytes(buf.comm, &tmp3);
|
||||
hash_to_scalar(&buf, sizeof(s_comm), sig.c);
|
||||
if (!sc_isnonzero((const unsigned char*)sig.c.data()))
|
||||
goto try_again;
|
||||
sc_mulsub(sig.r.data(), sig.c.data(), key_pair.private_key.data(),
|
||||
k.data());
|
||||
if (!sc_isnonzero((const unsigned char*)sig.r.data()))
|
||||
goto try_again;
|
||||
return sig;
|
||||
}
|
||||
|
||||
bool check_signature(const signature& sig, const hash& prefix_hash,
|
||||
const public_key_t& pub) {
|
||||
ge_p2 tmp2;
|
||||
ge_p3 tmp3;
|
||||
ec_scalar c;
|
||||
s_comm buf;
|
||||
// assert(check_key(pub));
|
||||
std::copy(prefix_hash.begin(), prefix_hash.end(), std::begin(buf.h));
|
||||
std::copy(pub.begin(), pub.end(), std::begin(buf.key));
|
||||
if (ge_frombytes_vartime(&tmp3, pub.data()) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (sc_check(sig.c.data()) != 0 || sc_check(sig.r.data()) != 0 ||
|
||||
!sc_isnonzero(sig.c.data())) {
|
||||
return false;
|
||||
}
|
||||
ge_double_scalarmult_base_vartime(&tmp2, sig.c.data(), &tmp3, sig.r.data());
|
||||
ge_tobytes(buf.comm, &tmp2);
|
||||
static const ec_point infinity = {{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
if (memcmp(buf.comm, &infinity, 32) == 0)
|
||||
return false;
|
||||
hash_to_scalar(&buf, sizeof(s_comm), c);
|
||||
sc_sub(c.data(), c.data(), sig.c.data());
|
||||
return sc_isnonzero(c.data()) == 0;
|
||||
}
|
||||
|
||||
bool check_signature(const std::string& signature_b64, const hash& hash,
|
||||
const std::string& public_key_b32z) {
|
||||
if (!oxenmq::is_base64(signature_b64))
|
||||
return false;
|
||||
|
||||
// 64 bytes bytes -> 86/88 base64 encoded bytes with/without padding
|
||||
if (!(signature_b64.size() == 86 ||
|
||||
(signature_b64.size() == 88 && signature_b64[86] == '=')))
|
||||
return false;
|
||||
|
||||
// convert signature
|
||||
signature sig;
|
||||
static_assert(sizeof(sig) == 64);
|
||||
oxenmq::from_base64(signature_b64.begin(), signature_b64.end(),
|
||||
reinterpret_cast<uint8_t*>(&sig));
|
||||
|
||||
// 32 bytes -> 52 base32z encoded characters
|
||||
if (public_key_b32z.size() != 52 || !oxenmq::is_base32z(public_key_b32z))
|
||||
return false;
|
||||
|
||||
// convert public key
|
||||
public_key_t public_key;
|
||||
static_assert(sizeof(public_key) == 32);
|
||||
oxenmq::from_base32z(public_key_b32z.begin(), public_key_b32z.end(),
|
||||
public_key.begin());
|
||||
|
||||
return check_signature(sig, hash, public_key);
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,20 +0,0 @@
|
|||
#include "channel_encryption.hpp"
|
||||
#include <assert.h>
|
||||
#include <iostream>
|
||||
|
||||
int main() {
|
||||
const std::vector<uint8_t> private_key{
|
||||
114, 19, 233, 130, 59, 240, 42, 209, 251, 142, 29,
|
||||
59, 200, 89, 234, 154, 202, 12, 29, 44, 180, 111,
|
||||
36, 158, 126, 252, 198, 236, 141, 163, 95, 15};
|
||||
ChannelEncryption<std::string> channel(private_key);
|
||||
const std::string pubKey =
|
||||
"86fe0345719904c47d9d3d24d742d110cab95f9386173057bd59f1c2249da174";
|
||||
const std::string plainText = "params\":{\"pubKey\":"
|
||||
"\"0549b42c7600a25ab9800903630a57f157a1a0f771"
|
||||
"cac31df559eb13fc5cc0c813\"}}";
|
||||
|
||||
const auto ciphertext = channel.encrypt_gcm(plainText, pubKey);
|
||||
const auto decrypted = channel.encrypt_gcm(ciphertext, pubKey);
|
||||
assert(plainText == decrypted);
|
||||
}
|
1
external/CLI11
vendored
Submodule
1
external/CLI11
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit b9be5b9444772324459989177108a6a65b8b2769
|
235
external/CMakeLists.txt
vendored
Normal file
235
external/CMakeLists.txt
vendored
Normal file
|
@ -0,0 +1,235 @@
|
|||
|
||||
if(BUILD_STATIC_DEPS)
|
||||
set(DEFAULT_STATIC ON)
|
||||
else()
|
||||
set(DEFAULT_STATIC OFF)
|
||||
endif()
|
||||
option(STATIC "Try to link external dependencies statically, where possible" ${DEFAULT_STATIC})
|
||||
|
||||
|
||||
if(NOT STATIC AND NOT BUILD_STATIC_DEPS)
|
||||
find_package(PkgConfig REQUIRED)
|
||||
else()
|
||||
set(BUILD_SHARED_LIBS OFF CACHE INTERNAL "" FORCE)
|
||||
endif()
|
||||
|
||||
if(NOT TARGET sodium)
|
||||
# Allow -D DOWNLOAD_SODIUM=FORCE to download without even checking for a local libsodium
|
||||
option(DOWNLOAD_SODIUM "Allow libsodium to be downloaded and built locally if not found on the system" OFF)
|
||||
if(NOT DOWNLOAD_SODIUM STREQUAL "FORCE" AND NOT BUILD_STATIC_DEPS)
|
||||
find_package(PkgConfig REQUIRED)
|
||||
pkg_check_modules(SODIUM libsodium>=1.0.18 IMPORTED_TARGET)
|
||||
endif()
|
||||
|
||||
add_library(sodium INTERFACE)
|
||||
if(SODIUM_FOUND AND NOT DOWNLOAD_SODIUM STREQUAL "FORCE" AND NOT BUILD_STATIC_DEPS)
|
||||
target_link_libraries(sodium INTERFACE PkgConfig::SODIUM)
|
||||
else()
|
||||
if(NOT DOWNLOAD_SODIUM AND NOT BUILD_STATIC_DEPS)
|
||||
message(FATAL_ERROR "Could not find libsodium >= 1.0.18; either install it on your system or use -DDOWNLOAD_SODIUM=ON to download and build an internal copy")
|
||||
endif()
|
||||
message(STATUS "Sodium >= 1.0.18 not found, but DOWNLOAD_SODIUM specified, so downloading it")
|
||||
include(DownloadLibSodium)
|
||||
target_link_libraries(sodium INTERFACE sodium_vendor)
|
||||
endif()
|
||||
|
||||
# Need this target export so that oxenmq properly picks up sodium
|
||||
export(TARGETS sodium NAMESPACE sodium:: FILE sodium-exports.cmake)
|
||||
endif()
|
||||
|
||||
|
||||
macro(system_or_submodule BIGNAME smallname pkgconf subdir)
|
||||
option(FORCE_${BIGNAME}_SUBMODULE "force using ${smallname} submodule" OFF)
|
||||
if(NOT STATIC AND NOT FORCE_${BIGNAME}_SUBMODULE)
|
||||
pkg_check_modules(${BIGNAME} ${pkgconf} IMPORTED_TARGET)
|
||||
endif()
|
||||
if(${BIGNAME}_FOUND)
|
||||
add_library(${smallname} INTERFACE)
|
||||
if(NOT TARGET PkgConfig::${BIGNAME} AND CMAKE_VERSION VERSION_LESS "3.21")
|
||||
# Work around cmake bug 22180 (PkgConfig::THING not set if no flags needed)
|
||||
else()
|
||||
target_link_libraries(${smallname} INTERFACE PkgConfig::${BIGNAME})
|
||||
endif()
|
||||
message(STATUS "Found system ${smallname} ${${BIGNAME}_VERSION}")
|
||||
else()
|
||||
message(STATUS "using ${smallname} submodule")
|
||||
add_subdirectory(${subdir})
|
||||
endif()
|
||||
if(NOT TARGET ${smallname}::${smallname})
|
||||
add_library(${smallname}::${smallname} ALIAS ${smallname})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
system_or_submodule(OXENC oxenc liboxenc>=1.0.4 oxenc)
|
||||
system_or_submodule(OXENMQ oxenmq liboxenmq>=1.2.13 oxen-mq)
|
||||
set(JSON_MultipleHeaders ON CACHE BOOL "") # Allows multi-header nlohmann use
|
||||
system_or_submodule(NLOHMANN nlohmann_json nlohmann_json>=3.7.0 nlohmann_json)
|
||||
system_or_submodule(CLI11 CLI11 CLI11>=2.2.0 CLI11)
|
||||
|
||||
|
||||
if (STATIC OR FORCE_SPDLOG_SUBMODULE OR FORCE_FMT_SUBMODULE)
|
||||
set(OXEN_LOGGING_FORCE_SUBMODULES ON CACHE INTERNAL "")
|
||||
endif()
|
||||
set(OXEN_LOGGING_SOURCE_ROOT "${PROJECT_SOURCE_DIR}/oxenss" CACHE INTERNAL "")
|
||||
add_subdirectory(oxen-logging)
|
||||
|
||||
|
||||
# uSockets doesn't really have a proper build system (just a very simple Makefile) so build it
|
||||
# ourselves.
|
||||
if (NOT CMAKE_VERSION VERSION_LESS 3.12)
|
||||
set(conf_depends "CONFIGURE_DEPENDS")
|
||||
else()
|
||||
set(conf_depends "")
|
||||
endif()
|
||||
file(GLOB usockets_src ${conf_depends}
|
||||
uWebSockets/uSockets/src/*.c
|
||||
uWebSockets/uSockets/src/eventing/*.c
|
||||
uWebSockets/uSockets/src/crypto/*.c
|
||||
uWebSockets/uSockets/src/crypto/*.cpp)
|
||||
file(COPY uWebSockets/uSockets/src/libusockets.h DESTINATION uWebSockets)
|
||||
add_library(uSockets STATIC EXCLUDE_FROM_ALL ${usockets_src})
|
||||
target_include_directories(uSockets PRIVATE uWebSockets/uSockets/src)
|
||||
target_compile_definitions(uSockets PRIVATE LIBUS_USE_OPENSSL)
|
||||
target_compile_features(uSockets PRIVATE c_std_11 cxx_std_17)
|
||||
target_link_libraries(uSockets OpenSSL::SSL OpenSSL::Crypto)
|
||||
|
||||
# On Windows uSockets uses libuv for its event loop; on Mac kqueue is the default, but that seems to
|
||||
# not be reliable on older macos versions (like 10.12), so we use libuv on macos as well.
|
||||
if (WIN32 OR (APPLE AND NOT IOS))
|
||||
if(BUILD_STATIC_DEPS)
|
||||
target_link_libraries(uSockets libuv)
|
||||
else()
|
||||
if(STATIC)
|
||||
pkg_check_modules(LIBUV libuv-static REQUIRED IMPORTED_TARGET)
|
||||
else()
|
||||
pkg_check_modules(LIBUV libuv REQUIRED IMPORTED_TARGET)
|
||||
endif()
|
||||
target_link_libraries(uSockets PkgConfig::LIBUV)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
# The uWebSockets C++ layer is header-only but isn't actually prefixed in the repository itself, but
|
||||
# rather only on install (which, as above, is just a very simple Makefile). This is unfortunate
|
||||
# because it means that we can't use `#include <uWebSockets/App.h>` directly with the repo; so
|
||||
# instead we emulate the installation process into the build directory and include it (with the
|
||||
# prefix) from there.
|
||||
file(COPY uWebSockets/src/ DESTINATION uWebSockets/uWebSockets FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp")
|
||||
add_library(uWebSockets INTERFACE)
|
||||
target_include_directories(uWebSockets SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/uWebSockets)
|
||||
target_link_libraries(uWebSockets INTERFACE uSockets)
|
||||
target_compile_definitions(uWebSockets INTERFACE UWS_HTTPRESPONSE_NO_WRITEMARK UWS_NO_ZLIB)
|
||||
|
||||
# cpr configuration. Ideally we'd just do this via add_subdirectory, but cpr's cmake requires
|
||||
# 3.15+, and we target lower than that (and this is fairly simple to build).
|
||||
|
||||
if(NOT BUILD_STATIC_DEPS)
|
||||
find_package(CURL REQUIRED COMPONENTS HTTP HTTPS SSL)
|
||||
|
||||
# CURL::libcurl wasn't added to FindCURL until cmake 3.12, so add it if necessary
|
||||
if (CMAKE_VERSION VERSION_LESS 3.12 AND NOT TARGET CURL::libcurl)
|
||||
add_library(libcurl UNKNOWN IMPORTED GLOBAL)
|
||||
set_target_properties(libcurl PROPERTIES
|
||||
IMPORTED_LOCATION ${CURL_LIBRARIES}
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${CURL_INCLUDE_DIRS}")
|
||||
add_library(CURL_libcurl INTERFACE)
|
||||
target_link_libraries(CURL_libcurl INTERFACE libcurl)
|
||||
add_library(CURL::libcurl ALIAS CURL_libcurl)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
file(GLOB cpr_sources ${conf_depends} cpr/cpr/*.cpp)
|
||||
file(READ cpr/CMakeLists.txt cpr_cmakelists_txt)
|
||||
if(NOT cpr_cmakelists_txt MATCHES "project\\(cpr VERSION ([0-9]+)\\.([0-9]+)\\.([0-9]+) LANGUAGES CXX\\)")
|
||||
message(FATAL_ERROR "Failed to detect cpr version")
|
||||
endif()
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/include/cpr)
|
||||
set(cpr_VERSION_MAJOR "${CMAKE_MATCH_1}")
|
||||
set(cpr_VERSION_MINOR "${CMAKE_MATCH_2}")
|
||||
set(cpr_VERSION_PATCH "${CMAKE_MATCH_3}")
|
||||
set(cpr_VERSION "${cpr_VERSION_MAJOR}.${cpr_VERSION_MINOR}.${cpr_VERSION_PATCH}")
|
||||
set(cpr_VERSION_NUM "(${cpr_VERSION_MAJOR} * 0x10000 + ${cpr_VERSION_MINOR} * 0x100 + ${cpr_VERSION_PATCH})")
|
||||
configure_file("cpr/cmake/cprver.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/cpr/cprver.h")
|
||||
|
||||
add_library(cpr STATIC EXCLUDE_FROM_ALL ${cpr_sources})
|
||||
target_link_libraries(cpr PUBLIC CURL::libcurl)
|
||||
target_include_directories(cpr PUBLIC cpr/include "${CMAKE_CURRENT_BINARY_DIR}/include")
|
||||
target_compile_definitions(cpr PUBLIC CPR_CURL_NOSIGNAL)
|
||||
add_library(cpr::cpr ALIAS cpr)
|
||||
|
||||
|
||||
option(USE_JEMALLOC "Link to jemalloc for memory allocations, if found" ON)
|
||||
add_library(jemalloc INTERFACE)
|
||||
add_library(jemalloc::jemalloc ALIAS jemalloc)
|
||||
if (USE_JEMALLOC AND NOT STATIC)
|
||||
pkg_check_modules(JEMALLOC jemalloc IMPORTED_TARGET)
|
||||
if(JEMALLOC_FOUND)
|
||||
target_link_libraries(jemalloc INTERFACE PkgConfig::JEMALLOC)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT BUILD_STATIC_DEPS) # Under BUILD_STATIC_DEPS the SQLite::SQLite3 target is already set up
|
||||
# We require 3.35.x for some queries we use (such as "RETURNING"), and 3.35.5 for bug fixes, but
|
||||
# that release is very new as of this writing so we build a static release if the system one is
|
||||
# too old.
|
||||
pkg_check_modules(SQLite3 sqlite3>=3.35.5 IMPORTED_TARGET)
|
||||
|
||||
if(SQLite3_FOUND)
|
||||
add_library(SQLite::SQLite3 ALIAS PkgConfig::SQLite3)
|
||||
else()
|
||||
include(ExternalProject)
|
||||
unset(SQLITE3_VERSION CACHE)
|
||||
include(sqlite3_source)
|
||||
|
||||
set(sqlite_urls)
|
||||
foreach(mirror ${SQLITE3_MIRROR})
|
||||
list(APPEND sqlite_urls "${mirror}/${SQLITE3_SOURCE}")
|
||||
endforeach()
|
||||
|
||||
set(deps_cc "${CMAKE_C_COMPILER}")
|
||||
if(CMAKE_C_COMPILER_LAUNCHER)
|
||||
set(deps_cc "${CMAKE_C_COMPILER_LAUNCHER} ${deps_cc}")
|
||||
endif()
|
||||
set(deps_CFLAGS "-O2")
|
||||
if(USE_LTO AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
# For unknown reasons, clang has trouble linking sqlite3 if we build it with lto
|
||||
set(deps_CFLAGS "${deps_CFLAGS} -flto")
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/include ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/lib)
|
||||
ExternalProject_Add(sqlite3_external
|
||||
BUILD_IN_SOURCE ON
|
||||
PREFIX ${CMAKE_CURRENT_BINARY_DIR}/sqlite3
|
||||
URL ${sqlite_urls}
|
||||
URL_HASH ${SQLITE3_HASH}
|
||||
DOWNLOAD_NO_PROGRESS ON
|
||||
CONFIGURE_COMMAND ./configure --disable-shared --prefix=${CMAKE_CURRENT_BINARY_DIR}/sqlite3 --with-pic "CC=${deps_cc}" "CFLAGS=${deps_CFLAGS}"
|
||||
BUILD_COMMAND true
|
||||
INSTALL_COMMAND make install-includeHEADERS install-libLTLIBRARIES
|
||||
BUILD_BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/lib/libsqlite3.a ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/include/sqlite3.h
|
||||
)
|
||||
add_library(SQLite::SQLite3 STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(SQLite::SQLite3 sqlite3_external)
|
||||
set_target_properties(SQLite::SQLite3 PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/lib/libsqlite3.a)
|
||||
target_include_directories(SQLite::SQLite3 INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/sqlite3/include)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Hack around SQLiteCpp's attempts to locate sqlite3 because we *don't* want to link against the
|
||||
# system one, but don't download and build the embedded one until build time. Thankfully it
|
||||
# actually links against the SQLite::SQLite3 cmake target if it already exists, so all we have to do
|
||||
# is set that up and circumvent some of the non-target bits of its FindSQLite3.cmake.
|
||||
set(SQLite3_FOUND TRUE CACHE BOOL "" FORCE)
|
||||
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/ignored")
|
||||
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/ignored/sqlite3.h" "#define SQLITE_VERSION \"${SQLite3_VERSION}\"")
|
||||
set(SQLite3_INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/ignored" CACHE STRING "" FORCE)
|
||||
set(SQLite3_LIBRARY "ignored" CACHE STRING "" FORCE)
|
||||
set(SQLITECPP_INTERNAL_SQLITE OFF CACHE BOOL "don't build SQLiteCpp's internal sqlite3" FORCE)
|
||||
set(SQLITE_ENABLE_COLUMN_METADATA OFF CACHE BOOL "" FORCE)
|
||||
set(SQLITECPP_RUN_CPPLINT OFF CACHE BOOL "" FORCE)
|
||||
set(SQLITECPP_RUN_CPPCHECK OFF CACHE BOOL "" FORCE)
|
||||
set(SQLITECPP_RUN_DOXYGEN OFF CACHE BOOL "" FORCE)
|
||||
set(SQLITECPP_BUILD_EXAMPLES OFF CACHE BOOL "" FORCE)
|
||||
set(SQLITECPP_BUILD_TESTS OFF CACHE BOOL "" FORCE)
|
||||
|
||||
add_subdirectory(SQLiteCpp EXCLUDE_FROM_ALL)
|
1
external/SQLiteCpp
vendored
Submodule
1
external/SQLiteCpp
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 9158225e5d16eb71b4404d13dd59ed70378bb4d1
|
1
external/cpr
vendored
Submodule
1
external/cpr
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit db351ffbbadc6c4e9239daaa26e9aefa9f0ec82d
|
1
external/nlohmann_json
vendored
Submodule
1
external/nlohmann_json
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit bc889afb4c5bf1c0d8ee29ef35eaaf4c8bef8a5d
|
1
external/oxen-logging
vendored
Submodule
1
external/oxen-logging
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 0dc33ea6fc9afbbb8c52c8d328cd6b3c38d347a5
|
1
external/oxen-mq
vendored
Submodule
1
external/oxen-mq
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit 4f6dc35ea13722a5f9dcd0c3d65b6b7ac3d0f0c5
|
1
external/oxenc
vendored
Submodule
1
external/oxenc
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit a869ae2b0152ad70855e3774a425c39a25ae1ca6
|
1
external/uWebSockets
vendored
Submodule
1
external/uWebSockets
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit e4c6fbf8defda2aeaf941e5c656bdc589d7d331c
|
|
@ -1,88 +0,0 @@
|
|||
add_definitions(-DDISABLE_ENCRYPTION)
|
||||
|
||||
# Stop compiling post first error
|
||||
add_definitions(-Wfatal-errors)
|
||||
|
||||
add_library(httpserver_lib STATIC
|
||||
main.cpp
|
||||
http_connection.cpp
|
||||
swarm.cpp
|
||||
service_node.cpp
|
||||
serialization.cpp
|
||||
rate_limiter.cpp
|
||||
https_client.cpp
|
||||
stats.cpp
|
||||
security.cpp
|
||||
command_line.cpp
|
||||
dns_text_records.cpp
|
||||
reachability_testing.cpp
|
||||
lmq_server.cpp
|
||||
request_handler.cpp
|
||||
onion_processing.cpp
|
||||
)
|
||||
|
||||
set(JSON_MultipleHeaders ON CACHE BOOL "") # Allows multi-header nlohmann use
|
||||
add_subdirectory(../vendors/nlohmann_json nlohmann_json)
|
||||
|
||||
# TODO: enable more warnings!
|
||||
target_compile_options(httpserver_lib PRIVATE -Werror=return-type)
|
||||
|
||||
target_link_libraries(httpserver_lib PUBLIC
|
||||
common storage utils pow crypto
|
||||
OpenSSL::SSL OpenSSL::Crypto
|
||||
nlohmann_json::nlohmann_json
|
||||
oxenmq::oxenmq
|
||||
Boost::system Boost::program_options)
|
||||
|
||||
# libresolv is needed on linux, but not on BSDs, so only link it if we can find it
|
||||
find_library(RESOLV resolv)
|
||||
if(RESOLV)
|
||||
target_link_libraries(httpserver_lib PUBLIC ${RESOLV})
|
||||
endif()
|
||||
|
||||
set(BIN_NAME oxen-storage)
|
||||
|
||||
add_executable(httpserver main.cpp)
|
||||
target_compile_options(httpserver PRIVATE -Wall -Wextra -Werror)
|
||||
set_target_properties(httpserver PROPERTIES OUTPUT_NAME ${BIN_NAME})
|
||||
target_link_libraries(httpserver PRIVATE httpserver_lib)
|
||||
install(TARGETS httpserver DESTINATION bin)
|
||||
# Build Info
|
||||
find_package(Git)
|
||||
if(GIT_FOUND)
|
||||
execute_process(
|
||||
COMMAND
|
||||
git rev-parse --short HEAD
|
||||
OUTPUT_VARIABLE
|
||||
SHORT_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
else()
|
||||
set(SHORT_HASH "unknown")
|
||||
endif()
|
||||
string(TIMESTAMP BUILD_TIME UTC)
|
||||
message(STATUS "using git commit hash ${SHORT_HASH}")
|
||||
message(STATUS "using UTC build time ${BUILD_TIME}")
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/version.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/version.cpp")
|
||||
target_sources(httpserver_lib PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/version.cpp")
|
||||
target_include_directories(httpserver_lib PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
|
||||
if(NOT BUILD_STATIC_DEPS)
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(SYSTEMD libsystemd)
|
||||
# Default ENABLE_SYSTEMD to true if we found it
|
||||
option(ENABLE_SYSTEMD "enable systemd integration for sd_notify" ${SYSTEMD_FOUND})
|
||||
|
||||
if(ENABLE_SYSTEMD)
|
||||
if(NOT SYSTEMD_FOUND)
|
||||
message(FATAL_ERROR "libsystemd not found")
|
||||
endif()
|
||||
target_compile_definitions(httpserver PRIVATE ENABLE_SYSTEMD)
|
||||
target_include_directories(httpserver PRIVATE ${SYSTEMD_INCLUDE_DIRS})
|
||||
target_link_libraries(httpserver PRIVATE ${SYSTEMD_LIBRARIES})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
#
|
|
@ -1,102 +0,0 @@
|
|||
#include "command_line.h"
|
||||
#include "oxen_logger.h"
|
||||
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace po = boost::program_options;
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
const command_line_options& command_line_parser::get_options() const {
|
||||
return options_;
|
||||
}
|
||||
|
||||
void command_line_parser::parse_args(int argc, char* argv[]) {
|
||||
std::string config_file;
|
||||
po::options_description all, hidden;
|
||||
// clang-format off
|
||||
desc_.add_options()
|
||||
("data-dir", po::value(&options_.data_dir), "Path to persistent data (defaults to ~/.oxen/storage)")
|
||||
("config-file", po::value(&config_file), "Path to custom config file (defaults to `storage-server.conf' inside --data-dir)")
|
||||
("log-level", po::value(&options_.log_level), "Log verbosity level, see Log Levels below for accepted values")
|
||||
("oxend-rpc-ip", po::value(&options_.oxend_rpc_ip), "RPC IP on which the local Oxen daemon is listening (usually localhost)")
|
||||
("oxend-rpc-port", po::value(&options_.oxend_rpc_port), "RPC port on which the local Oxen daemon is listening")
|
||||
("lmq-port", po::value(&options_.lmq_port), "Port used by OxenMQ")
|
||||
("testnet", po::bool_switch(&options_.testnet), "Start storage server in testnet mode")
|
||||
("force-start", po::bool_switch(&options_.force_start), "Ignore the initialisation ready check")
|
||||
("bind-ip", po::value(&options_.ip)->default_value("0.0.0.0"), "IP to which to bind the server")
|
||||
("version,v", po::bool_switch(&options_.print_version), "Print the version of this binary")
|
||||
("help", po::bool_switch(&options_.print_help),"Shows this help message")
|
||||
("stats-access-key", po::value(&options_.stats_access_keys)->multitoken(), "A public key (x25519) that will be given access to the `get_stats` lmq endpoint");
|
||||
// Add hidden ip and port options. You technically can use the `--ip=` and `--port=` with
|
||||
// these here, but they are meant to be positional. More usefully, you can specify `ip=`
|
||||
// and `port=` in the config file to specify them.
|
||||
hidden.add_options()
|
||||
("ip", po::value<std::string>(), "(unused)")
|
||||
("port", po::value(&options_.port), "Port to listen on")
|
||||
("oxend-key", po::value(&options_.oxend_key), "Legacy secret key (test only)")
|
||||
("lokid-rpc-ip", po::value(&options_.oxend_rpc_ip), "Backwards compatible option for oxend RPC IP")
|
||||
("lokid-rpc-port", po::value(&options_.oxend_rpc_port), "Backwards compatible option for oxend RPC port")
|
||||
("oxend-x25519-key", po::value(&options_.oxend_x25519_key), "x25519 secret key (test only)")
|
||||
("oxend-ed25519-key", po::value(&options_.oxend_ed25519_key), "ed25519 public key (test only)");
|
||||
// clang-format on
|
||||
|
||||
all.add(desc_).add(hidden);
|
||||
po::positional_options_description pos_desc;
|
||||
pos_desc.add("ip", 1);
|
||||
pos_desc.add("port", 1);
|
||||
|
||||
binary_name_ = fs::u8path(argv[0]).filename().u8string();
|
||||
|
||||
po::variables_map vm;
|
||||
|
||||
po::store(po::command_line_parser(argc, argv)
|
||||
.options(all)
|
||||
.positional(pos_desc)
|
||||
.run(),
|
||||
vm);
|
||||
po::notify(vm);
|
||||
|
||||
fs::path config_path{!config_file.empty()
|
||||
? fs::u8path(config_file)
|
||||
: fs::u8path(options_.data_dir) / "storage-server.conf"};
|
||||
|
||||
if (fs::exists(config_path)) {
|
||||
po::store(po::parse_config_file<char>(config_path.u8string().c_str(), all), vm);
|
||||
po::notify(vm);
|
||||
} else if (vm.count("config-file")) {
|
||||
throw std::runtime_error(
|
||||
"path provided in --config-file does not exist");
|
||||
}
|
||||
|
||||
if (options_.print_version || options_.print_help) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (options_.testnet && !vm.count("oxend-rpc-port")) {
|
||||
options_.oxend_rpc_port = 38157;
|
||||
}
|
||||
|
||||
if (!vm.count("lmq-port")) {
|
||||
throw std::runtime_error(
|
||||
"lmq-port command line option is not specified");
|
||||
}
|
||||
|
||||
if (!vm.count("ip") || !vm.count("port")) {
|
||||
throw std::runtime_error(
|
||||
"Invalid option: address and/or port missing.");
|
||||
}
|
||||
}
|
||||
|
||||
void command_line_parser::print_usage() const {
|
||||
std::cerr << "Usage: " << binary_name_ << " <address> <port> [...]\n\n";
|
||||
|
||||
desc_.print(std::cerr);
|
||||
|
||||
std::cerr << std::endl;
|
||||
|
||||
print_log_levels();
|
||||
}
|
||||
} // namespace oxen
|
|
@ -1,41 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/program_options.hpp>
|
||||
#include <string>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
struct command_line_options {
|
||||
uint16_t port;
|
||||
std::string oxend_rpc_ip = "127.0.0.1";
|
||||
uint16_t oxend_rpc_port = 22023; // Or 38157 if `testnet`
|
||||
uint16_t lmq_port;
|
||||
bool force_start = false;
|
||||
bool print_version = false;
|
||||
bool print_help = false;
|
||||
bool testnet = false;
|
||||
std::string ip;
|
||||
std::string log_level = "info";
|
||||
std::string data_dir;
|
||||
std::string oxend_key; // test only (but needed for backwards compatibility)
|
||||
std::string oxend_x25519_key; // test only
|
||||
std::string oxend_ed25519_key; // test only
|
||||
// x25519 key that will be given access to get_stats lmq endpoint
|
||||
std::vector<std::string> stats_access_keys;
|
||||
};
|
||||
|
||||
class command_line_parser {
|
||||
public:
|
||||
void parse_args(int argc, char* argv[]);
|
||||
bool early_exit() const;
|
||||
|
||||
const command_line_options& get_options() const;
|
||||
void print_usage() const;
|
||||
|
||||
private:
|
||||
boost::program_options::options_description desc_;
|
||||
command_line_options options_;
|
||||
std::string binary_name_;
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,152 +0,0 @@
|
|||
#include "dns_text_records.h"
|
||||
#include <nlohmann/json.hpp>
|
||||
#include "pow.hpp"
|
||||
#include "version.h"
|
||||
#include <netinet/in.h>
|
||||
#include <resolv.h>
|
||||
#include <charconv>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
static constexpr char POW_DIFFICULTY_URL[] = "sentinel.messenger.loki.network";
|
||||
static constexpr char LATEST_VERSION_URL[] = "storage.version.loki.network";
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace dns {
|
||||
|
||||
static std::string get_dns_record(const char* url, std::error_code& ec) {
|
||||
|
||||
std::string data;
|
||||
unsigned char query_buffer[1024] = {};
|
||||
|
||||
// don't want to assume that ec has default value
|
||||
ec = std::error_code{};
|
||||
|
||||
int response =
|
||||
res_query(url, ns_c_in, ns_t_txt, query_buffer, sizeof(query_buffer));
|
||||
|
||||
if (response == -1) {
|
||||
OXEN_LOG(warn, "res_query failed while retrieving dns entry");
|
||||
ec = std::make_error_code(std::errc::bad_message);
|
||||
return data;
|
||||
}
|
||||
|
||||
ns_msg nsMsg;
|
||||
|
||||
if (ns_initparse(query_buffer, response, &nsMsg) == -1) {
|
||||
OXEN_LOG(warn, "ns_initparse failed while retrieving dns entry");
|
||||
ec = std::make_error_code(std::errc::bad_message);
|
||||
return data;
|
||||
}
|
||||
|
||||
// We get back a sequence of N...[N...] values where N is a byte indicating
|
||||
// the length of the immediately following ... data.
|
||||
const auto count = ns_msg_count(nsMsg, ns_s_an);
|
||||
|
||||
constexpr size_t DNS_MAX_CHUNK_LENGTH = 255;
|
||||
|
||||
data.reserve(DNS_MAX_CHUNK_LENGTH * count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
ns_rr rr;
|
||||
if (ns_parserr(&nsMsg, ns_s_an, i, &rr) == -1) {
|
||||
OXEN_LOG(warn, "ns_parserr failed while parsing dns entry");
|
||||
ec = std::make_error_code(std::errc::bad_message);
|
||||
return data;
|
||||
}
|
||||
auto* rdata = ns_rr_rdata(rr);
|
||||
data.append(reinterpret_cast<const char*>(rdata + 1), rdata[0]);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
std::vector<pow_difficulty_t> query_pow_difficulty(std::error_code& ec) {
|
||||
OXEN_LOG(debug, "Querying PoW difficulty...");
|
||||
|
||||
std::vector<pow_difficulty_t> new_history;
|
||||
const std::string data = get_dns_record(POW_DIFFICULTY_URL, ec);
|
||||
if (ec) {
|
||||
return new_history;
|
||||
}
|
||||
|
||||
try {
|
||||
const json history = json::parse(data, nullptr, true);
|
||||
for (const auto& el : history.items()) {
|
||||
const std::chrono::milliseconds timestamp(std::stoul(el.key()));
|
||||
const int difficulty = el.value().get<int>();
|
||||
new_history.push_back(pow_difficulty_t{timestamp, difficulty});
|
||||
}
|
||||
return new_history;
|
||||
} catch (const std::exception& e) {
|
||||
OXEN_LOG(warn, "JSON parsing of PoW data failed: {}", e.what());
|
||||
ec = std::make_error_code(std::errc::bad_message);
|
||||
return new_history;
|
||||
}
|
||||
}
|
||||
|
||||
static std::string query_latest_version() {
|
||||
OXEN_LOG(debug, "Querying Latest Version...");
|
||||
|
||||
std::error_code ec;
|
||||
const std::string version_str = get_dns_record(LATEST_VERSION_URL, ec);
|
||||
|
||||
if (ec) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return version_str;
|
||||
}
|
||||
|
||||
using version_t = std::array<uint16_t, 3>;
|
||||
|
||||
static bool parse_version(const std::string& str, version_t& version_out) {
|
||||
std::vector<std::string> strs;
|
||||
strs.reserve(3);
|
||||
boost::split(strs, str, boost::is_any_of("."));
|
||||
if (strs.size() != 3)
|
||||
return false;
|
||||
|
||||
for (size_t i = 0; i < 3; i++) {
|
||||
auto* end = strs[i].data() + strs[i].size();
|
||||
auto [p, ec] = std::from_chars(strs[i].data(), end, version_out[i]);
|
||||
if (ec != std::errc() || p != end)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void check_latest_version() {
|
||||
|
||||
const auto latest_version_str = query_latest_version();
|
||||
|
||||
if (latest_version_str.empty()) {
|
||||
OXEN_LOG(warn, "Failed to retrieve or parse the latest version number "
|
||||
"from DNS record");
|
||||
return;
|
||||
}
|
||||
|
||||
version_t latest_version;
|
||||
if (!parse_version(latest_version_str, latest_version)) {
|
||||
OXEN_LOG(warn, "Could not parse the latest version: {}",
|
||||
latest_version_str);
|
||||
return;
|
||||
}
|
||||
|
||||
if (STORAGE_SERVER_VERSION < latest_version) {
|
||||
OXEN_LOG(warn,
|
||||
"You are using an outdated version of the storage server "
|
||||
"({}), please update to {}!",
|
||||
STORAGE_SERVER_VERSION_STRING, latest_version_str);
|
||||
} else {
|
||||
OXEN_LOG(debug,
|
||||
"You are using the latest version of the storage server ({})",
|
||||
STORAGE_SERVER_VERSION_STRING);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace dns
|
||||
} // namespace oxen
|
|
@ -1,16 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_logger.h"
|
||||
|
||||
struct pow_difficulty_t;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace dns {
|
||||
|
||||
std::vector<pow_difficulty_t> query_pow_difficulty(std::error_code& ec);
|
||||
|
||||
void check_latest_version();
|
||||
|
||||
} // namespace dns
|
||||
} // namespace oxen
|
File diff suppressed because it is too large
Load diff
|
@ -1,358 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <chrono>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/ssl/stream.hpp>
|
||||
#include <boost/beast/core.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/beast/version.hpp>
|
||||
#include <boost/format.hpp>
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include "oxend_key.h"
|
||||
#include "swarm.h"
|
||||
|
||||
constexpr auto OXEN_SENDER_SNODE_PUBKEY_HEADER = "X-Loki-Snode-PubKey";
|
||||
constexpr auto OXEN_SNODE_SIGNATURE_HEADER = "X-Loki-Snode-Signature";
|
||||
constexpr auto OXEN_SENDER_KEY_HEADER = "X-Sender-Public-Key";
|
||||
constexpr auto OXEN_TARGET_SNODE_KEY = "X-Target-Snode-Key";
|
||||
constexpr auto OXEN_LONG_POLL_HEADER = "X-Loki-Long-Poll";
|
||||
|
||||
template <typename T>
|
||||
class ChannelEncryption;
|
||||
|
||||
class RateLimiter;
|
||||
|
||||
namespace http = boost::beast::http; // from <boost/beast/http.hpp>
|
||||
namespace ssl = boost::asio::ssl; // from <boost/asio/ssl.hpp>
|
||||
|
||||
using request_t = http::request<http::string_body>;
|
||||
using response_t = http::response<http::string_body>;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
std::shared_ptr<request_t> build_post_request(const char* target,
|
||||
std::string&& data);
|
||||
|
||||
class Security;
|
||||
|
||||
class RequestHandler;
|
||||
class Response;
|
||||
|
||||
namespace storage {
|
||||
struct Item;
|
||||
}
|
||||
|
||||
using storage::Item;
|
||||
|
||||
enum class SNodeError { NO_ERROR, ERROR_OTHER, NO_REACH, HTTP_ERROR };
|
||||
|
||||
struct sn_response_t {
|
||||
SNodeError error_code;
|
||||
std::shared_ptr<std::string> body;
|
||||
std::optional<response_t> raw_response;
|
||||
};
|
||||
|
||||
template <typename OStream>
|
||||
OStream& operator<<(OStream& os, const sn_response_t& res) {
|
||||
switch (res.error_code) {
|
||||
case SNodeError::NO_ERROR:
|
||||
os << "NO_ERROR";
|
||||
break;
|
||||
case SNodeError::ERROR_OTHER:
|
||||
os << "ERROR_OTHER";
|
||||
break;
|
||||
case SNodeError::NO_REACH:
|
||||
os << "NO_REACH";
|
||||
break;
|
||||
case SNodeError::HTTP_ERROR:
|
||||
os << "HTTP_ERROR";
|
||||
break;
|
||||
}
|
||||
|
||||
return os << "(" << (res.body ? *res.body : "n/a") << ")";
|
||||
}
|
||||
|
||||
struct blockchain_test_answer_t {
|
||||
uint64_t res_height;
|
||||
};
|
||||
|
||||
/// Blockchain test parameters
|
||||
struct bc_test_params_t {
|
||||
uint64_t max_height;
|
||||
uint64_t seed;
|
||||
};
|
||||
|
||||
using http_callback_t = std::function<void(sn_response_t)>;
|
||||
|
||||
class OxendClient {
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
std::string oxend_rpc_ip_;
|
||||
const uint16_t oxend_rpc_port_;
|
||||
|
||||
public:
|
||||
OxendClient(boost::asio::io_context& ioc, std::string ip, uint16_t port);
|
||||
void make_oxend_request(std::string_view method,
|
||||
const nlohmann::json& params,
|
||||
http_callback_t&& cb) const;
|
||||
void make_custom_oxend_request(const std::string& daemon_ip,
|
||||
const uint16_t daemon_port,
|
||||
std::string_view method,
|
||||
const nlohmann::json& params,
|
||||
http_callback_t&& cb) const;
|
||||
// Synchronously fetches the private key from oxend. Designed to be called
|
||||
// *before* the io_context has been started (this runs it, waits for a
|
||||
// successful fetch, then restarts it when finished).
|
||||
std::tuple<private_key_t, private_key_ed25519_t, private_key_t>
|
||||
wait_for_privkey();
|
||||
};
|
||||
|
||||
constexpr auto SESSION_TIME_LIMIT = std::chrono::seconds(60);
|
||||
|
||||
void make_http_request(boost::asio::io_context& ioc, const std::string& ip,
|
||||
uint16_t port, const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb);
|
||||
|
||||
class HttpClientSession
|
||||
: public std::enable_shared_from_this<HttpClientSession> {
|
||||
|
||||
using tcp = boost::asio::ip::tcp;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
tcp::socket socket_;
|
||||
tcp::endpoint endpoint_;
|
||||
http_callback_t callback_;
|
||||
boost::asio::steady_timer deadline_timer_;
|
||||
|
||||
boost::beast::flat_buffer buffer_;
|
||||
/// NOTE: this needs to be a shared pointer since
|
||||
/// it is very common for the same request to be
|
||||
/// sent to multiple snodes
|
||||
std::shared_ptr<request_t> req_;
|
||||
response_t res_;
|
||||
|
||||
bool used_callback_ = false;
|
||||
bool needs_cleanup = true;
|
||||
|
||||
void on_connect();
|
||||
|
||||
void on_write(boost::system::error_code ec, std::size_t bytes_transferred);
|
||||
|
||||
void on_read(boost::system::error_code ec, std::size_t bytes_transferred);
|
||||
|
||||
void trigger_callback(SNodeError error,
|
||||
std::shared_ptr<std::string>&& body);
|
||||
|
||||
void clean_up();
|
||||
|
||||
public:
|
||||
// Resolver and socket require an io_context
|
||||
HttpClientSession(boost::asio::io_context& ioc, const tcp::endpoint& ep,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb);
|
||||
|
||||
// initiate the client connection
|
||||
void start();
|
||||
|
||||
~HttpClientSession();
|
||||
};
|
||||
|
||||
namespace http_server {
|
||||
|
||||
class connection_t : public std::enable_shared_from_this<connection_t> {
|
||||
|
||||
using tcp = boost::asio::ip::tcp;
|
||||
|
||||
private:
|
||||
boost::asio::io_context& ioc_;
|
||||
ssl::context& ssl_ctx_;
|
||||
|
||||
// The socket for the currently connected client.
|
||||
tcp::socket socket_;
|
||||
|
||||
// The buffer for performing reads.
|
||||
boost::beast::flat_buffer buffer_{8192};
|
||||
ssl::stream<tcp::socket&> stream_;
|
||||
const Security& security_;
|
||||
|
||||
// Contains the request message
|
||||
http::request_parser<http::string_body> request_;
|
||||
|
||||
// The response message.
|
||||
response_t response_;
|
||||
|
||||
// whether the response should be sent asyncronously,
|
||||
// as opposed to directly after connection_t::process_request
|
||||
bool delay_response_ = false;
|
||||
|
||||
// TODO: remove SN, only use Reqeust Handler as a mediator
|
||||
ServiceNode& service_node_;
|
||||
|
||||
RequestHandler& request_handler_;
|
||||
|
||||
RateLimiter& rate_limiter_;
|
||||
|
||||
// The timer for repeating an action within one connection
|
||||
boost::asio::steady_timer repeat_timer_;
|
||||
int repetition_count_ = 0;
|
||||
std::chrono::time_point<std::chrono::steady_clock> start_timestamp_;
|
||||
|
||||
// The timer for putting a deadline on connection processing.
|
||||
boost::asio::steady_timer deadline_;
|
||||
|
||||
/// TODO: move these if possible
|
||||
std::map<std::string, std::string> header_;
|
||||
|
||||
std::stringstream body_stream_;
|
||||
|
||||
// Note that we are only sending a single message through the
|
||||
// notification mechanism. If we somehow accumulated multiple
|
||||
// messages before notification event happens (unlikely), the
|
||||
// following messages will be delivered with the client's
|
||||
// consequent (and immediate) retrieve request
|
||||
struct notification_context_t {
|
||||
// The timer used for internal db polling
|
||||
boost::asio::steady_timer timer;
|
||||
// the message is stored here momentarily; needed because
|
||||
// we can't pass it using current notification mechanism
|
||||
std::optional<message_t> message;
|
||||
// Messenger public key that this connection is registered for
|
||||
std::string pubkey;
|
||||
};
|
||||
|
||||
std::optional<notification_context_t> notification_ctx_;
|
||||
|
||||
// If present, this function will be called just before
|
||||
// writing the response
|
||||
std::function<void(response_t&)> response_modifier_;
|
||||
|
||||
public:
|
||||
connection_t(boost::asio::io_context& ioc, ssl::context& ssl_ctx,
|
||||
tcp::socket socket, ServiceNode& sn, RequestHandler& rh,
|
||||
RateLimiter& rate_limiter, const Security& security);
|
||||
|
||||
~connection_t();
|
||||
|
||||
// Connection index, mainly used for debugging
|
||||
uint64_t conn_idx;
|
||||
|
||||
/// Initiate the asynchronous operations associated with the connection.
|
||||
void start();
|
||||
|
||||
void notify(const message_t* msg);
|
||||
|
||||
private:
|
||||
void do_handshake();
|
||||
void on_handshake(boost::system::error_code ec);
|
||||
/// Asynchronously receive a complete request message.
|
||||
void read_request();
|
||||
|
||||
void do_close();
|
||||
void on_shutdown(boost::system::error_code ec);
|
||||
|
||||
/// process GET /get_stats/v1
|
||||
void on_get_stats();
|
||||
|
||||
/// Determine what needs to be done with the request message
|
||||
/// (synchronously).
|
||||
void process_request();
|
||||
|
||||
/// Unsubscribe listener (if any) and shutdown the connection
|
||||
void clean_up();
|
||||
|
||||
/// Asynchronously transmit the response message.
|
||||
void write_response();
|
||||
|
||||
/// Syncronously (?) process client store/load requests
|
||||
void process_client_req_rate_limited();
|
||||
|
||||
void process_swarm_req(std::string_view target);
|
||||
|
||||
/// Process onion request from the client (json)
|
||||
void process_onion_req_v1();
|
||||
|
||||
/// Process onion request from the client (binary)
|
||||
void process_onion_req_v2();
|
||||
|
||||
void process_proxy_req();
|
||||
|
||||
void process_file_proxy_req();
|
||||
|
||||
// Check whether we have spent enough time on this connection.
|
||||
void register_deadline();
|
||||
|
||||
/// Process storage test request and repeat if necessary
|
||||
void process_storage_test_req(uint64_t height,
|
||||
const std::string& tester_addr,
|
||||
const std::string& msg_hash);
|
||||
|
||||
void process_blockchain_test_req(uint64_t height,
|
||||
const std::string& tester_pk,
|
||||
bc_test_params_t params);
|
||||
|
||||
void set_response(const Response& res);
|
||||
|
||||
bool parse_header(const char* key);
|
||||
|
||||
template <typename... Args>
|
||||
bool parse_header(const char* first, Args... args);
|
||||
|
||||
bool validate_snode_request();
|
||||
};
|
||||
|
||||
void run(boost::asio::io_context& ioc, const std::string& ip, uint16_t port,
|
||||
const std::filesystem::path& base_path, ServiceNode& sn,
|
||||
RequestHandler& rh, RateLimiter& rate_limiter, Security&);
|
||||
|
||||
} // namespace http_server
|
||||
|
||||
constexpr const char* error_string(SNodeError err) {
|
||||
switch (err) {
|
||||
case oxen::SNodeError::NO_ERROR:
|
||||
return "NO_ERROR";
|
||||
case oxen::SNodeError::ERROR_OTHER:
|
||||
return "ERROR_OTHER";
|
||||
case oxen::SNodeError::NO_REACH:
|
||||
return "NO_REACH";
|
||||
case oxen::SNodeError::HTTP_ERROR:
|
||||
return "HTTP_ERROR";
|
||||
default:
|
||||
return "[UNKNOWN]";
|
||||
}
|
||||
}
|
||||
|
||||
struct CiphertextPlusJson {
|
||||
std::string ciphertext;
|
||||
std::string json;
|
||||
};
|
||||
|
||||
// TODO: move this from http_connection.h after refactoring
|
||||
auto parse_combined_payload(const std::string& payload) -> CiphertextPlusJson;
|
||||
|
||||
} // namespace oxen
|
||||
|
||||
namespace fmt {
|
||||
|
||||
template <>
|
||||
struct formatter<oxen::SNodeError> {
|
||||
|
||||
template <typename ParseContext>
|
||||
constexpr auto parse(ParseContext& ctx) {
|
||||
return ctx.begin();
|
||||
}
|
||||
|
||||
template <typename FormatContext>
|
||||
auto format(const oxen::SNodeError& err, FormatContext& ctx) {
|
||||
return format_to(ctx.out(), error_string(err));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace fmt
|
|
@ -1,350 +0,0 @@
|
|||
#include "https_client.h"
|
||||
#include "oxen_logger.h"
|
||||
#include "net_stats.h"
|
||||
#include "signature.h"
|
||||
|
||||
#include <boost/algorithm/string/erase.hpp>
|
||||
#include <openssl/x509.h>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
using error_code = boost::system::error_code;
|
||||
|
||||
void make_https_request(boost::asio::io_context& ioc,
|
||||
const std::string& sn_address, uint16_t port,
|
||||
const std::string& sn_pubkey_b32z,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb) {
|
||||
|
||||
error_code ec;
|
||||
boost::asio::ip::tcp::resolver resolver(ioc);
|
||||
#ifdef INTEGRATION_TEST
|
||||
const auto resolve_results =
|
||||
resolver.resolve("0.0.0.0", std::to_string(port), ec);
|
||||
#else
|
||||
|
||||
if (sn_address == "0.0.0.0") {
|
||||
OXEN_LOG(debug, "Could not initiate request to snode (we don't know "
|
||||
"their IP yet).");
|
||||
|
||||
cb(sn_response_t{SNodeError::NO_REACH, nullptr});
|
||||
return;
|
||||
}
|
||||
|
||||
const auto resolve_results =
|
||||
resolver.resolve(sn_address, std::to_string(port), ec);
|
||||
#endif
|
||||
if (ec) {
|
||||
OXEN_LOG(error,
|
||||
"https: Failed to parse the IP address. Error code = {}. "
|
||||
"Message: {}",
|
||||
ec.value(), ec.message());
|
||||
return;
|
||||
}
|
||||
|
||||
static ssl::context ctx{ssl::context::tlsv12_client};
|
||||
|
||||
auto session = std::make_shared<HttpsClientSession>(
|
||||
ioc, ctx, std::move(resolve_results), req, std::move(cb),
|
||||
sn_pubkey_b32z);
|
||||
|
||||
session->start();
|
||||
}
|
||||
|
||||
void make_https_request(boost::asio::io_context& ioc, const std::string& url,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb) {
|
||||
|
||||
static boost::asio::ip::tcp::resolver resolver(ioc);
|
||||
|
||||
constexpr char prefix[] = "https://";
|
||||
std::string query = url;
|
||||
|
||||
if (url.find(prefix) == 0) {
|
||||
query.erase(0, sizeof(prefix) - 1);
|
||||
}
|
||||
|
||||
auto resolve_handler = [&ioc, req, query, cb = std::move(cb)](
|
||||
const boost::system::error_code& ec,
|
||||
boost::asio::ip::tcp::resolver::results_type
|
||||
resolve_results) mutable {
|
||||
if (ec) {
|
||||
OXEN_LOG(error, "DNS resolution error for {}: {}", query,
|
||||
ec.message());
|
||||
cb({SNodeError::ERROR_OTHER});
|
||||
return;
|
||||
}
|
||||
|
||||
static ssl::context ctx{ssl::context::tlsv12_client};
|
||||
|
||||
auto session = std::make_shared<HttpsClientSession>(
|
||||
ioc, ctx, std::move(resolve_results), req, std::move(cb),
|
||||
std::nullopt);
|
||||
|
||||
session->start();
|
||||
};
|
||||
|
||||
constexpr char https_port[] = "443";
|
||||
|
||||
resolver.async_resolve(
|
||||
query, https_port,
|
||||
boost::asio::ip::tcp::resolver::query::numeric_service,
|
||||
resolve_handler);
|
||||
}
|
||||
|
||||
static std::string x509_to_string(X509* x509) {
|
||||
BIO* bio_out = BIO_new(BIO_s_mem());
|
||||
if (!bio_out) {
|
||||
OXEN_LOG(critical, "Could not allocate openssl BIO");
|
||||
return "";
|
||||
}
|
||||
if (!PEM_write_bio_X509(bio_out, x509)) {
|
||||
OXEN_LOG(critical, "Could not write x509 cert to openssl BIO");
|
||||
return "";
|
||||
}
|
||||
BUF_MEM* bio_buf;
|
||||
BIO_get_mem_ptr(bio_out, &bio_buf);
|
||||
std::string pem = std::string(bio_buf->data, bio_buf->length);
|
||||
if (!BIO_free(bio_out)) {
|
||||
OXEN_LOG(critical, "Could not free openssl BIO");
|
||||
}
|
||||
return pem;
|
||||
}
|
||||
|
||||
HttpsClientSession::HttpsClientSession(
|
||||
boost::asio::io_context& ioc, ssl::context& ssl_ctx,
|
||||
tcp::resolver::results_type resolve_results,
|
||||
const std::shared_ptr<request_t>& req, http_callback_t&& cb,
|
||||
std::optional<std::string> sn_pubkey_b32z)
|
||||
: ioc_(ioc), ssl_ctx_(ssl_ctx), resolve_results_(resolve_results),
|
||||
callback_(cb), deadline_timer_(ioc), stream_(ioc, ssl_ctx_), req_(req),
|
||||
server_pub_key_b32z_(std::move(sn_pubkey_b32z)) {
|
||||
|
||||
get_net_stats().https_connections_out++;
|
||||
|
||||
response_.body_limit(1024 * 1024 * 10); // 10 mb
|
||||
|
||||
static uint64_t connection_count = 0;
|
||||
this->connection_idx = connection_count++;
|
||||
}
|
||||
|
||||
void HttpsClientSession::start() {
|
||||
// Set SNI Hostname (many hosts need this to handshake successfully)
|
||||
if (!SSL_set_tlsext_host_name(stream_.native_handle(), "service node")) {
|
||||
boost::beast::error_code ec{static_cast<int>(::ERR_get_error()),
|
||||
boost::asio::error::get_ssl_category()};
|
||||
OXEN_LOG(critical, "{}", ec.message());
|
||||
return;
|
||||
}
|
||||
boost::asio::async_connect(
|
||||
stream_.next_layer(), resolve_results_,
|
||||
[this, self = shared_from_this()](boost::system::error_code ec,
|
||||
const tcp::endpoint& endpoint) {
|
||||
/// TODO: I think I should just call again if ec ==
|
||||
/// EINTR
|
||||
if (ec) {
|
||||
/// Don't forget to print the error from where we call this!
|
||||
/// (similar to http)
|
||||
OXEN_LOG(debug,
|
||||
"[https client]: could not connect to {}:{}, message: "
|
||||
"{} ({})",
|
||||
endpoint.address().to_string(), endpoint.port(),
|
||||
ec.message(), ec.value());
|
||||
trigger_callback(SNodeError::NO_REACH, nullptr);
|
||||
return;
|
||||
}
|
||||
|
||||
self->on_connect();
|
||||
});
|
||||
|
||||
deadline_timer_.expires_after(SESSION_TIME_LIMIT);
|
||||
deadline_timer_.async_wait(
|
||||
[self = shared_from_this()](const error_code& ec) {
|
||||
if (ec) {
|
||||
if (ec != boost::asio::error::operation_aborted) {
|
||||
OXEN_LOG(error,
|
||||
"Deadline timer failed in https client session "
|
||||
"[{}: {}]",
|
||||
ec.value(), ec.message());
|
||||
}
|
||||
} else {
|
||||
OXEN_LOG(debug, "client socket timed out");
|
||||
self->do_close();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void HttpsClientSession::on_connect() {
|
||||
OXEN_LOG(trace, "on connect, connection idx: {}", this->connection_idx);
|
||||
|
||||
const auto sockfd = stream_.lowest_layer().native_handle();
|
||||
OXEN_LOG(trace, "Open https client socket: {}", sockfd);
|
||||
get_net_stats().record_socket_open(sockfd);
|
||||
|
||||
stream_.set_verify_mode(ssl::verify_none);
|
||||
|
||||
stream_.set_verify_callback(
|
||||
[this](bool preverified, ssl::verify_context& ctx) -> bool {
|
||||
if (!preverified) {
|
||||
X509_STORE_CTX* handle = ctx.native_handle();
|
||||
X509* x509 = X509_STORE_CTX_get0_cert(handle);
|
||||
server_cert_ = x509_to_string(x509);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
stream_.async_handshake(ssl::stream_base::client,
|
||||
std::bind(&HttpsClientSession::on_handshake,
|
||||
shared_from_this(),
|
||||
std::placeholders::_1));
|
||||
}
|
||||
|
||||
void HttpsClientSession::on_handshake(boost::system::error_code ec) {
|
||||
if (ec) {
|
||||
OXEN_LOG(error, "Failed to perform a handshake with {}: {}",
|
||||
server_pub_key_b32z_.value_or("(not snode)"), ec.message());
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
http::async_write(stream_, *req_,
|
||||
std::bind(&HttpsClientSession::on_write,
|
||||
shared_from_this(), std::placeholders::_1,
|
||||
std::placeholders::_2));
|
||||
}
|
||||
|
||||
void HttpsClientSession::on_write(error_code ec, size_t bytes_transferred) {
|
||||
|
||||
OXEN_LOG(trace, "on write");
|
||||
if (ec) {
|
||||
OXEN_LOG(error, "Https error on write, ec: {}. Message: {}", ec.value(),
|
||||
ec.message());
|
||||
trigger_callback(SNodeError::ERROR_OTHER, nullptr);
|
||||
return;
|
||||
}
|
||||
|
||||
OXEN_LOG(trace, "Successfully transferred {} bytes.", bytes_transferred);
|
||||
|
||||
// Receive the HTTP response
|
||||
http::async_read(stream_, buffer_, response_,
|
||||
std::bind(&HttpsClientSession::on_read, shared_from_this(),
|
||||
std::placeholders::_1, std::placeholders::_2));
|
||||
}
|
||||
|
||||
bool HttpsClientSession::verify_signature() {
|
||||
|
||||
if (!server_pub_key_b32z_)
|
||||
return true;
|
||||
|
||||
const auto& response = response_.get();
|
||||
|
||||
const auto it = response.find(OXEN_SNODE_SIGNATURE_HEADER);
|
||||
if (it == response.end()) {
|
||||
OXEN_LOG(warn, "no signature found in header from {}",
|
||||
*server_pub_key_b32z_);
|
||||
return false;
|
||||
}
|
||||
// signature is expected to be base64 enoded
|
||||
const auto signature = it->value().to_string();
|
||||
const auto hash = hash_data(server_cert_);
|
||||
return check_signature(signature, hash, *server_pub_key_b32z_);
|
||||
}
|
||||
|
||||
void HttpsClientSession::on_read(error_code ec, size_t bytes_transferred) {
|
||||
|
||||
OXEN_LOG(trace, "Successfully received {} bytes", bytes_transferred);
|
||||
|
||||
const auto &response = response_.get();
|
||||
|
||||
if (!ec || (ec == http::error::end_of_stream)) {
|
||||
|
||||
if (http::to_status_class(response.result_int()) ==
|
||||
http::status_class::successful) {
|
||||
|
||||
if (server_pub_key_b32z_ && !verify_signature()) {
|
||||
OXEN_LOG(debug, "Bad signature from {}", *server_pub_key_b32z_);
|
||||
trigger_callback(SNodeError::ERROR_OTHER, nullptr, response);
|
||||
} else {
|
||||
auto body = std::make_shared<std::string>(response.body());
|
||||
trigger_callback(SNodeError::NO_ERROR, std::move(body), response);
|
||||
}
|
||||
|
||||
} else {
|
||||
OXEN_LOG(debug, "ERROR OTHER: [{}] {}", response.result_int(), response.body());
|
||||
trigger_callback(SNodeError::ERROR_OTHER, nullptr, response);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
/// Do we need to handle `operation aborted` separately here (due to
|
||||
/// deadline timer)?
|
||||
OXEN_LOG(error, "Error on read: {}. Message: {}", ec.value(),
|
||||
ec.message());
|
||||
trigger_callback(SNodeError::ERROR_OTHER, nullptr, response);
|
||||
}
|
||||
|
||||
// Gracefully close the socket
|
||||
do_close();
|
||||
|
||||
// not_connected happens sometimes so don't bother reporting it.
|
||||
if (ec && ec != boost::system::errc::not_connected) {
|
||||
|
||||
OXEN_LOG(error, "ec: {}. Message: {}", ec.value(), ec.message());
|
||||
return;
|
||||
}
|
||||
|
||||
// If we get here then the connection is closed gracefully
|
||||
}
|
||||
|
||||
void HttpsClientSession::trigger_callback(
|
||||
SNodeError error, std::shared_ptr<std::string>&& body,
|
||||
std::optional<response_t> raw_response) {
|
||||
ioc_.post(std::bind(callback_, sn_response_t{error, body, raw_response}));
|
||||
used_callback_ = true;
|
||||
deadline_timer_.cancel();
|
||||
}
|
||||
|
||||
void HttpsClientSession::do_close() {
|
||||
|
||||
// Note: I don't think both the server and the client
|
||||
// should initiate the shutdown, but I'm going to ignore
|
||||
// this error as we will remove https soon
|
||||
|
||||
// Gracefully close the stream
|
||||
stream_.async_shutdown(std::bind(&HttpsClientSession::on_shutdown,
|
||||
shared_from_this(),
|
||||
std::placeholders::_1));
|
||||
}
|
||||
|
||||
void HttpsClientSession::on_shutdown(boost::system::error_code ec) {
|
||||
if (ec == boost::asio::error::eof) {
|
||||
// Rationale:
|
||||
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
|
||||
ec.assign(0, ec.category());
|
||||
} else if (ec) {
|
||||
// This one is too noisy, so demoted to debug:
|
||||
OXEN_LOG(trace, "could not shutdown stream gracefully: {} ({})",
|
||||
ec.message(), ec.value());
|
||||
}
|
||||
|
||||
const auto sockfd = stream_.lowest_layer().native_handle();
|
||||
OXEN_LOG(trace, "Close https socket: {}", sockfd);
|
||||
get_net_stats().record_socket_close(sockfd);
|
||||
|
||||
stream_.lowest_layer().close();
|
||||
|
||||
// If we get here then the connection is closed gracefully
|
||||
}
|
||||
|
||||
/// We execute callback (if haven't already) here to make sure it is called
|
||||
HttpsClientSession::~HttpsClientSession() {
|
||||
|
||||
if (!used_callback_) {
|
||||
// If we destroy the session before posting the callback,
|
||||
// it must be due to some error
|
||||
ioc_.post(std::bind(callback_,
|
||||
sn_response_t{SNodeError::ERROR_OTHER, nullptr}));
|
||||
}
|
||||
|
||||
get_net_stats().https_connections_out--;
|
||||
}
|
||||
} // namespace oxen
|
|
@ -1,80 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "http_connection.h"
|
||||
#include <functional>
|
||||
#include <optional>
|
||||
|
||||
namespace oxen {
|
||||
using http_callback_t = std::function<void(sn_response_t)>;
|
||||
|
||||
void make_https_request(boost::asio::io_context& ioc, const std::string& ip,
|
||||
uint16_t port, const std::string& sn_pubkey_b32z,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb);
|
||||
|
||||
void make_https_request(boost::asio::io_context& ioc, const std::string& url,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb);
|
||||
|
||||
class HttpsClientSession
|
||||
: public std::enable_shared_from_this<HttpsClientSession> {
|
||||
|
||||
// For debugging purposes mostly
|
||||
uint64_t connection_idx;
|
||||
|
||||
using tcp = boost::asio::ip::tcp;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
ssl::context& ssl_ctx_;
|
||||
tcp::resolver::results_type resolve_results_;
|
||||
http_callback_t callback_;
|
||||
boost::asio::steady_timer deadline_timer_;
|
||||
|
||||
// keep the cert in memory for post-handshake verification
|
||||
std::string server_cert_;
|
||||
|
||||
ssl::stream<tcp::socket> stream_;
|
||||
boost::beast::flat_buffer buffer_;
|
||||
/// NOTE: this needs to be a shared pointer since
|
||||
/// it is very common for the same request to be
|
||||
/// sent to multiple snodes
|
||||
std::shared_ptr<request_t> req_;
|
||||
|
||||
http::response_parser<http::string_body> response_;
|
||||
|
||||
// Snode's pub key (none if signature verification is not used / not a
|
||||
// snode)
|
||||
std::optional<std::string> server_pub_key_b32z_;
|
||||
|
||||
bool used_callback_ = false;
|
||||
|
||||
void on_connect();
|
||||
|
||||
void on_write(boost::system::error_code ec, std::size_t bytes_transferred);
|
||||
|
||||
void on_read(boost::system::error_code ec, std::size_t bytes_transferred);
|
||||
|
||||
void
|
||||
trigger_callback(SNodeError error, std::shared_ptr<std::string>&& body,
|
||||
std::optional<response_t> raw_response = std::nullopt);
|
||||
|
||||
void on_handshake(boost::system::error_code ec);
|
||||
bool verify_signature();
|
||||
|
||||
void do_close();
|
||||
void on_shutdown(boost::system::error_code ec);
|
||||
|
||||
public:
|
||||
// Resolver and socket require an io_context
|
||||
HttpsClientSession(boost::asio::io_context& ioc, ssl::context& ssl_ctx,
|
||||
tcp::resolver::results_type resolve_results,
|
||||
const std::shared_ptr<request_t>& req,
|
||||
http_callback_t&& cb,
|
||||
std::optional<std::string> sn_pubkey_b32z);
|
||||
|
||||
// initiate the client connection
|
||||
void start();
|
||||
|
||||
~HttpsClientSession();
|
||||
};
|
||||
} // namespace oxen
|
|
@ -1,243 +0,0 @@
|
|||
#include "lmq_server.h"
|
||||
|
||||
#include "dev_sink.h"
|
||||
#include "oxen_common.h"
|
||||
#include "oxen_logger.h"
|
||||
#include "oxend_key.h"
|
||||
#include "request_handler.h"
|
||||
#include "service_node.h"
|
||||
|
||||
#include <oxenmq/hex.h>
|
||||
#include <oxenmq/oxenmq.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <optional>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
std::string OxenmqServer::peer_lookup(std::string_view pubkey_bin) const {
|
||||
|
||||
OXEN_LOG(trace, "[LMQ] Peer Lookup");
|
||||
|
||||
// TODO: don't create a new string here
|
||||
std::optional<sn_record_t> sn =
|
||||
this->service_node_->find_node_by_x25519_bin(std::string(pubkey_bin));
|
||||
|
||||
if (sn) {
|
||||
return fmt::format("tcp://{}:{}", sn->ip(), sn->lmq_port());
|
||||
} else {
|
||||
OXEN_LOG(debug, "[LMQ] peer node not found {}!", pubkey_bin);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
void OxenmqServer::handle_sn_data(oxenmq::Message& message) {
|
||||
|
||||
OXEN_LOG(debug, "[LMQ] handle_sn_data");
|
||||
OXEN_LOG(debug, "[LMQ] thread id: {}", std::this_thread::get_id());
|
||||
OXEN_LOG(debug, "[LMQ] from: {}", oxenmq::to_hex(message.conn.pubkey()));
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
// We are only expecting a single part message, so consider removing this
|
||||
for (auto& part : message.data) {
|
||||
ss << part;
|
||||
}
|
||||
|
||||
// TODO: proces push batch should move to "Request handler"
|
||||
service_node_->process_push_batch(ss.str());
|
||||
|
||||
OXEN_LOG(debug, "[LMQ] send reply");
|
||||
|
||||
// TODO: Investigate if the above could fail and whether we should report
|
||||
// that to the sending SN
|
||||
message.send_reply();
|
||||
};
|
||||
|
||||
void OxenmqServer::handle_sn_proxy_exit(oxenmq::Message& message) {
|
||||
|
||||
OXEN_LOG(debug, "[LMQ] handle_sn_proxy_exit");
|
||||
OXEN_LOG(debug, "[LMQ] thread id: {}", std::this_thread::get_id());
|
||||
OXEN_LOG(debug, "[LMQ] from: {}", oxenmq::to_hex(message.conn.pubkey()));
|
||||
|
||||
if (message.data.size() != 2) {
|
||||
OXEN_LOG(debug, "Expected 2 message parts, got {}",
|
||||
message.data.size());
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& client_key = message.data[0];
|
||||
const auto& payload = message.data[1];
|
||||
|
||||
auto& reply_tag = message.reply_tag;
|
||||
auto& origin_pk = message.conn.pubkey();
|
||||
|
||||
// TODO: accept string_view?
|
||||
request_handler_->process_proxy_exit(
|
||||
std::string(client_key), std::string(payload),
|
||||
[this, origin_pk, reply_tag](oxen::Response res) {
|
||||
OXEN_LOG(debug, " Proxy exit status: {}", res.status());
|
||||
|
||||
if (res.status() == Status::OK) {
|
||||
this->oxenmq_->send(origin_pk, "REPLY", reply_tag,
|
||||
res.message());
|
||||
|
||||
} else {
|
||||
// We reply with 2 messages which will be treated as
|
||||
// an error (rather than timeout)
|
||||
this->oxenmq_->send(origin_pk, "REPLY", reply_tag,
|
||||
fmt::format("{}", res.status()),
|
||||
res.message());
|
||||
OXEN_LOG(debug, "Error: status is not OK for proxy_exit: {}",
|
||||
res.status());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void OxenmqServer::handle_onion_request(oxenmq::Message& message, bool v2) {
|
||||
|
||||
OXEN_LOG(debug, "Got an onion request over OXENMQ");
|
||||
|
||||
auto& reply_tag = message.reply_tag;
|
||||
auto& origin_pk = message.conn.pubkey();
|
||||
|
||||
auto on_response = [this, origin_pk,
|
||||
reply_tag](oxen::Response res) mutable {
|
||||
OXEN_LOG(trace, "on response: {}...", to_string(res).substr(0, 100));
|
||||
|
||||
std::string status = std::to_string(static_cast<int>(res.status()));
|
||||
|
||||
oxenmq_->send(origin_pk, "REPLY", reply_tag, std::move(status),
|
||||
res.message());
|
||||
};
|
||||
|
||||
if (message.data.size() == 1 && message.data[0] == "ping") {
|
||||
// Before 2.0.3 we reply with a bad request, below, but reply here to
|
||||
// avoid putting the error message in the log on 2.0.3+ nodes. (the
|
||||
// reply code here doesn't actually matter; the ping test only requires
|
||||
// that we provide *some* response).
|
||||
OXEN_LOG(debug, "Remote pinged me");
|
||||
service_node_->update_last_ping(ReachType::ZMQ);
|
||||
on_response(oxen::Response{Status::OK, "pong"});
|
||||
return;
|
||||
}
|
||||
|
||||
if (message.data.size() != 2) {
|
||||
OXEN_LOG(error, "Expected 2 message parts, got {}",
|
||||
message.data.size());
|
||||
on_response(oxen::Response{Status::BAD_REQUEST,
|
||||
"Incorrect number of messages"});
|
||||
return;
|
||||
}
|
||||
|
||||
const auto& eph_key = message.data[0];
|
||||
const auto& ciphertext = message.data[1];
|
||||
|
||||
request_handler_->process_onion_req(std::string(ciphertext),
|
||||
std::string(eph_key), on_response, v2);
|
||||
}
|
||||
|
||||
void OxenmqServer::handle_get_logs(oxenmq::Message& message) {
|
||||
|
||||
OXEN_LOG(debug, "Received get_logs request via LMQ");
|
||||
|
||||
auto dev_sink = dynamic_cast<oxen::dev_sink_mt*>(
|
||||
spdlog::get("oxen_logger")->sinks()[2].get());
|
||||
|
||||
if (dev_sink == nullptr) {
|
||||
OXEN_LOG(critical, "Sink #3 should be dev sink");
|
||||
assert(false);
|
||||
auto err_msg = "Developer error: sink #3 is not a dev sink.";
|
||||
message.send_reply(err_msg);
|
||||
}
|
||||
|
||||
nlohmann::json val;
|
||||
val["entries"] = dev_sink->peek();
|
||||
message.send_reply(val.dump(4));
|
||||
}
|
||||
|
||||
void OxenmqServer::handle_get_stats(oxenmq::Message& message) {
|
||||
|
||||
OXEN_LOG(debug, "Received get_stats request via LMQ");
|
||||
|
||||
auto payload = service_node_->get_stats();
|
||||
|
||||
message.send_reply(payload);
|
||||
}
|
||||
|
||||
void OxenmqServer::init(ServiceNode* sn, RequestHandler* rh,
|
||||
const oxend_key_pair_t& keypair,
|
||||
const std::vector<std::string>& stats_access_keys) {
|
||||
|
||||
using oxenmq::Allow;
|
||||
|
||||
service_node_ = sn;
|
||||
request_handler_ = rh;
|
||||
|
||||
for (const auto& key : stats_access_keys) {
|
||||
this->stats_access_keys.push_back(oxenmq::from_hex(key));
|
||||
}
|
||||
|
||||
auto pubkey = key_to_string(keypair.public_key);
|
||||
auto seckey = key_to_string(keypair.private_key);
|
||||
|
||||
auto logger = [](oxenmq::LogLevel level, const char* file, int line,
|
||||
std::string message) {
|
||||
#define LMQ_LOG_MAP(LMQ_LVL, SS_LVL) \
|
||||
case oxenmq::LogLevel::LMQ_LVL: \
|
||||
OXEN_LOG(SS_LVL, "[{}:{}]: {}", file, line, message); \
|
||||
break;
|
||||
switch (level) {
|
||||
LMQ_LOG_MAP(fatal, critical);
|
||||
LMQ_LOG_MAP(error, error);
|
||||
LMQ_LOG_MAP(warn, warn);
|
||||
LMQ_LOG_MAP(info, info);
|
||||
LMQ_LOG_MAP(trace, trace);
|
||||
default:
|
||||
OXEN_LOG(debug, "[{}:{}]: {}", file, line, message);
|
||||
};
|
||||
#undef LMQ_LOG_MAP
|
||||
};
|
||||
|
||||
auto lookup_fn = [this](auto pk) { return this->peer_lookup(pk); };
|
||||
|
||||
oxenmq_.reset(new OxenMQ{pubkey, seckey, true /* is service node */,
|
||||
lookup_fn, logger});
|
||||
|
||||
OXEN_LOG(info, "OxenMQ is listenting on port {}", port_);
|
||||
|
||||
oxenmq_->log_level(oxenmq::LogLevel::info);
|
||||
// clang-format off
|
||||
oxenmq_->add_category("sn", oxenmq::Access{oxenmq::AuthLevel::none, true, false})
|
||||
.add_request_command("data", [this](auto& m) { this->handle_sn_data(m); })
|
||||
.add_request_command("proxy_exit", [this](auto& m) { this->handle_sn_proxy_exit(m); })
|
||||
.add_request_command("onion_req", [this](auto& m) { this->handle_onion_request(m, false); })
|
||||
.add_request_command("onion_req_v2", [this](auto& m) { this->handle_onion_request(m, true); })
|
||||
;
|
||||
|
||||
oxenmq_->add_category("service", oxenmq::AuthLevel::admin)
|
||||
.add_request_command("get_stats", [this](auto& m) { this->handle_get_stats(m); })
|
||||
.add_request_command("get_logs", [this](auto& m) { this->handle_get_logs(m); });
|
||||
|
||||
// clang-format on
|
||||
oxenmq_->set_general_threads(1);
|
||||
|
||||
oxenmq_->listen_curve(
|
||||
fmt::format("tcp://0.0.0.0:{}", port_),
|
||||
[this](std::string_view /*ip*/, std::string_view pk, bool /*sn*/) {
|
||||
const auto& keys = this->stats_access_keys;
|
||||
const auto it = std::find(keys.begin(), keys.end(), pk);
|
||||
return it == keys.end() ? oxenmq::AuthLevel::none
|
||||
: oxenmq::AuthLevel::admin;
|
||||
});
|
||||
|
||||
oxenmq_->MAX_MSG_SIZE =
|
||||
10 * 1024 * 1024; // 10 MB (needed by the fileserver)
|
||||
|
||||
oxenmq_->start();
|
||||
}
|
||||
|
||||
OxenmqServer::OxenmqServer(uint16_t port) : port_(port){};
|
||||
OxenmqServer::~OxenmqServer() = default;
|
||||
|
||||
} // namespace oxen
|
|
@ -1,71 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
|
||||
namespace oxenmq {
|
||||
class OxenMQ;
|
||||
struct Allow;
|
||||
class Message;
|
||||
} // namespace oxenmq
|
||||
|
||||
using oxenmq::OxenMQ;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
struct oxend_key_pair_t;
|
||||
class ServiceNode;
|
||||
class RequestHandler;
|
||||
|
||||
class OxenmqServer {
|
||||
|
||||
std::unique_ptr<OxenMQ> oxenmq_;
|
||||
|
||||
// Has information about current SNs
|
||||
ServiceNode* service_node_;
|
||||
|
||||
RequestHandler* request_handler_;
|
||||
|
||||
// Get nodes' address
|
||||
std::string peer_lookup(std::string_view pubkey_bin) const;
|
||||
|
||||
// Handle Session data coming from peer SN
|
||||
void handle_sn_data(oxenmq::Message& message);
|
||||
|
||||
// Handle Session client requests arrived via proxy
|
||||
void handle_sn_proxy_exit(oxenmq::Message& message);
|
||||
|
||||
// v2 indicates whether to use the new (v2) protocol
|
||||
void handle_onion_request(oxenmq::Message& message, bool v2);
|
||||
|
||||
void handle_get_logs(oxenmq::Message& message);
|
||||
|
||||
void handle_get_stats(oxenmq::Message& message);
|
||||
|
||||
uint16_t port_ = 0;
|
||||
|
||||
// Access keys for the 'service' category as binary
|
||||
std::vector<std::string> stats_access_keys;
|
||||
|
||||
public:
|
||||
OxenmqServer(uint16_t port);
|
||||
~OxenmqServer();
|
||||
|
||||
// Initialize oxenmq
|
||||
void init(ServiceNode* sn, RequestHandler* rh,
|
||||
const oxend_key_pair_t& keypair,
|
||||
const std::vector<std::string>& stats_access_key);
|
||||
|
||||
uint16_t port() { return port_; }
|
||||
|
||||
/// True if OxenMQ instance has been set
|
||||
explicit operator bool() const { return (bool)oxenmq_; }
|
||||
/// Dereferencing via * or -> accesses the contained OxenMQ instance.
|
||||
OxenMQ& operator*() const { return *oxenmq_; }
|
||||
OxenMQ* operator->() const { return oxenmq_.get(); }
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,259 +0,0 @@
|
|||
#include "channel_encryption.hpp"
|
||||
#include "command_line.h"
|
||||
#include "http_connection.h"
|
||||
#include "oxen_logger.h"
|
||||
#include "oxend_key.h"
|
||||
#include "rate_limiter.h"
|
||||
#include "security.h"
|
||||
#include "service_node.h"
|
||||
#include "swarm.h"
|
||||
#include "utils.hpp"
|
||||
#include "version.h"
|
||||
|
||||
#include "lmq_server.h"
|
||||
#include "request_handler.h"
|
||||
|
||||
#include <sodium.h>
|
||||
#include <oxenmq/hex.h>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <filesystem>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#ifdef ENABLE_SYSTEMD
|
||||
extern "C" {
|
||||
#include <systemd/sd-daemon.h>
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
static std::optional<fs::path> get_home_dir() {
|
||||
|
||||
/// TODO: support default dir for Windows
|
||||
#ifdef WIN32
|
||||
return std::nullopt;
|
||||
#endif
|
||||
|
||||
char* pszHome = getenv("HOME");
|
||||
if (pszHome == NULL || strlen(pszHome) == 0)
|
||||
return std::nullopt;
|
||||
|
||||
return fs::u8path(pszHome);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_SYSTEMD
|
||||
static void systemd_watchdog_tick(boost::asio::steady_timer& timer,
|
||||
const oxen::ServiceNode& sn) {
|
||||
using namespace std::literals;
|
||||
sd_notify(0, ("WATCHDOG=1\nSTATUS=" + sn.get_status_line()).c_str());
|
||||
timer.expires_after(10s);
|
||||
timer.async_wait([&](const boost::system::error_code&) {
|
||||
systemd_watchdog_tick(timer, sn);
|
||||
});
|
||||
}
|
||||
#endif
|
||||
|
||||
constexpr int EXIT_INVALID_PORT = 2;
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
|
||||
oxen::command_line_parser parser;
|
||||
|
||||
try {
|
||||
parser.parse_args(argc, argv);
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << e.what() << std::endl;
|
||||
parser.print_usage();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
auto options = parser.get_options();
|
||||
|
||||
if (options.print_help) {
|
||||
parser.print_usage();
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
if (options.print_version) {
|
||||
std::cout << STORAGE_SERVER_VERSION_INFO;
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
if (options.data_dir.empty()) {
|
||||
if (auto home_dir = get_home_dir()) {
|
||||
if (options.testnet) {
|
||||
options.data_dir =
|
||||
(*home_dir / ".oxen" / "testnet" / "storage").string();
|
||||
} else {
|
||||
options.data_dir = (*home_dir / ".oxen" / "storage").string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!fs::exists(options.data_dir)) {
|
||||
fs::create_directories(options.data_dir);
|
||||
}
|
||||
|
||||
oxen::LogLevel log_level;
|
||||
if (!oxen::parse_log_level(options.log_level, log_level)) {
|
||||
std::cerr << "Incorrect log level: " << options.log_level << std::endl;
|
||||
oxen::print_log_levels();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
oxen::init_logging(options.data_dir, log_level);
|
||||
|
||||
if (options.testnet) {
|
||||
oxen::set_testnet();
|
||||
OXEN_LOG(warn,
|
||||
"Starting in testnet mode, make sure this is intentional!");
|
||||
}
|
||||
|
||||
// Always print version for the logs
|
||||
OXEN_LOG(info, "{}", STORAGE_SERVER_VERSION_INFO);
|
||||
|
||||
if (options.ip == "127.0.0.1") {
|
||||
OXEN_LOG(critical,
|
||||
"Tried to bind oxen-storage to localhost, please bind "
|
||||
"to outward facing address");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (options.port == options.oxend_rpc_port) {
|
||||
OXEN_LOG(error, "Storage server port must be different from that of "
|
||||
"Oxend! Terminating.");
|
||||
exit(EXIT_INVALID_PORT);
|
||||
}
|
||||
|
||||
OXEN_LOG(info, "Setting log level to {}", options.log_level);
|
||||
OXEN_LOG(info, "Setting database location to {}", options.data_dir);
|
||||
OXEN_LOG(info, "Setting Oxend RPC to {}:{}", options.oxend_rpc_ip,
|
||||
options.oxend_rpc_port);
|
||||
OXEN_LOG(info, "Https server is listening at {}:{}", options.ip,
|
||||
options.port);
|
||||
OXEN_LOG(info, "OxenMQ is listening at {}:{}", options.ip,
|
||||
options.lmq_port);
|
||||
|
||||
boost::asio::io_context ioc{1};
|
||||
boost::asio::io_context worker_ioc{1};
|
||||
|
||||
if (sodium_init() != 0) {
|
||||
OXEN_LOG(error, "Could not initialize libsodium");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
if (crypto_aead_aes256gcm_is_available() == 0) {
|
||||
OXEN_LOG(error, "AES-256-GCM is not available on this CPU");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
{
|
||||
const auto fd_limit = util::get_fd_limit();
|
||||
if (fd_limit != -1) {
|
||||
OXEN_LOG(debug, "Open file descriptor limit: {}", fd_limit);
|
||||
} else {
|
||||
OXEN_LOG(debug, "Open descriptor limit: N/A");
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
auto oxend_client = oxen::OxendClient(ioc, options.oxend_rpc_ip,
|
||||
options.oxend_rpc_port);
|
||||
|
||||
// Normally we request the key from daemon, but in integrations/swarm
|
||||
// testing we are not able to do that, so we extract the key as a
|
||||
// command line option:
|
||||
oxen::private_key_t private_key;
|
||||
oxen::private_key_ed25519_t private_key_ed25519; // Unused at the moment
|
||||
oxen::private_key_t private_key_x25519;
|
||||
#ifndef INTEGRATION_TEST
|
||||
std::tie(private_key, private_key_ed25519, private_key_x25519) =
|
||||
oxend_client.wait_for_privkey();
|
||||
#else
|
||||
private_key = oxen::oxendKeyFromHex(options.oxend_key);
|
||||
OXEN_LOG(info, "OXEND LEGACY KEY: {}", options.oxend_key);
|
||||
|
||||
private_key_x25519 = oxen::oxendKeyFromHex(options.oxend_x25519_key);
|
||||
OXEN_LOG(info, "x25519 SECRET KEY: {}", options.oxend_x25519_key);
|
||||
|
||||
private_key_ed25519 =
|
||||
oxen::private_key_ed25519_t::from_hex(options.oxend_ed25519_key);
|
||||
|
||||
OXEN_LOG(info, "ed25519 SECRET KEY: {}", options.oxend_ed25519_key);
|
||||
#endif
|
||||
|
||||
const auto public_key = oxen::derive_pubkey_legacy(private_key);
|
||||
OXEN_LOG(info, "Retrieved keys from Lokid; our SN pubkey is: {}",
|
||||
oxenmq::to_hex(public_key.begin(), public_key.end()));
|
||||
|
||||
// TODO: avoid conversion to vector
|
||||
const std::vector<uint8_t> priv(private_key_x25519.begin(),
|
||||
private_key_x25519.end());
|
||||
ChannelEncryption<std::string> channel_encryption(priv);
|
||||
|
||||
oxen::oxend_key_pair_t oxend_key_pair{private_key, public_key};
|
||||
|
||||
const auto public_key_x25519 =
|
||||
oxen::derive_pubkey_x25519(private_key_x25519);
|
||||
|
||||
OXEN_LOG(info, "SN x25519 pubkey is: {}", oxenmq::to_hex(
|
||||
public_key_x25519.begin(), public_key_x25519.end()));
|
||||
|
||||
const auto public_key_ed25519 =
|
||||
oxen::derive_pubkey_ed25519(private_key_ed25519);
|
||||
|
||||
const std::string pubkey_ed25519_hex = oxenmq::to_hex(
|
||||
public_key_ed25519.begin(), public_key_ed25519.end());
|
||||
|
||||
OXEN_LOG(info, "SN ed25519 pubkey is: {}", pubkey_ed25519_hex);
|
||||
|
||||
oxen::oxend_key_pair_t oxend_key_pair_x25519{private_key_x25519,
|
||||
public_key_x25519};
|
||||
|
||||
for (const auto& key : options.stats_access_keys) {
|
||||
OXEN_LOG(info, "Stats access key: {}", key);
|
||||
}
|
||||
|
||||
// We pass port early because we want to send it in the first ping to
|
||||
// Oxend (in ServiceNode's constructor), but don't want to initialize
|
||||
// the rest of lmq server before we have a reference to ServiceNode
|
||||
oxen::OxenmqServer oxenmq_server(options.lmq_port);
|
||||
|
||||
// TODO: SN doesn't need oxenmq_server, just the lmq components
|
||||
oxen::ServiceNode service_node(ioc, worker_ioc, options.port,
|
||||
oxenmq_server, oxend_key_pair,
|
||||
pubkey_ed25519_hex, options.data_dir,
|
||||
oxend_client, options.force_start);
|
||||
|
||||
oxen::RequestHandler request_handler(ioc, service_node, oxend_client,
|
||||
channel_encryption);
|
||||
|
||||
oxenmq_server.init(&service_node, &request_handler,
|
||||
oxend_key_pair_x25519, options.stats_access_keys);
|
||||
|
||||
RateLimiter rate_limiter;
|
||||
|
||||
oxen::Security security(oxend_key_pair, options.data_dir);
|
||||
|
||||
#ifdef ENABLE_SYSTEMD
|
||||
sd_notify(0, "READY=1");
|
||||
boost::asio::steady_timer systemd_watchdog_timer(ioc);
|
||||
systemd_watchdog_tick(systemd_watchdog_timer, service_node);
|
||||
#endif
|
||||
|
||||
oxen::http_server::run(ioc, options.ip, options.port, options.data_dir,
|
||||
service_node, request_handler, rate_limiter,
|
||||
security);
|
||||
} catch (const std::exception& e) {
|
||||
// It seems possible for logging to throw its own exception,
|
||||
// in which case it will be propagated to libc...
|
||||
std::cerr << "Exception caught in main: " << e.what() << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
} catch (...) {
|
||||
std::cerr << "Unknown exception caught in main." << std::endl;
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_logger.h"
|
||||
#include <set>
|
||||
|
||||
struct net_stats_t {
|
||||
|
||||
std::atomic<uint32_t> connections_in{0};
|
||||
std::atomic<uint32_t> http_connections_out{0};
|
||||
std::atomic<uint32_t> https_connections_out{0};
|
||||
|
||||
std::set<int> open_fds;
|
||||
|
||||
void record_socket_open(int sockfd) {
|
||||
#ifdef INTEGRATION_TEST
|
||||
if (open_fds.find(sockfd) != open_fds.end()) {
|
||||
OXEN_LOG(critical, "Already recorded as open: {}!", sockfd);
|
||||
}
|
||||
open_fds.insert(sockfd);
|
||||
#endif
|
||||
}
|
||||
|
||||
void record_socket_close(int sockfd) {
|
||||
#ifdef INTEGRATION_TEST
|
||||
if (open_fds.find(sockfd) == open_fds.end()) {
|
||||
OXEN_LOG(critical, "Socket is NOT recorded as open: {}", sockfd);
|
||||
}
|
||||
open_fds.erase(sockfd);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
inline net_stats_t& get_net_stats() {
|
||||
static net_stats_t stats;
|
||||
return stats;
|
||||
}
|
|
@ -1,352 +0,0 @@
|
|||
#include "channel_encryption.hpp"
|
||||
#include "oxen_logger.h"
|
||||
#include "request_handler.h"
|
||||
#include "service_node.h"
|
||||
#include <oxenmq/base64.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
/// This is only included because of `parse_combined_payload`,
|
||||
/// in the future it will be moved
|
||||
#include "http_connection.h"
|
||||
|
||||
#include <charconv>
|
||||
#include <variant>
|
||||
|
||||
using nlohmann::json;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
/// The request is to be forwarded to another SS node
|
||||
struct RelayToNodeInfo {
|
||||
/// Inner ciphertext for next node
|
||||
std::string ciphertext;
|
||||
// Key to be forwarded to next node for decryption
|
||||
std::string ephemeral_key;
|
||||
// Next node's ed25519 key
|
||||
std::string next_node;
|
||||
};
|
||||
|
||||
/// The request is to be forwarded to some non-SS server
|
||||
/// that supports our protocol (e.g. Session File Server)
|
||||
struct RelayToServerInfo {
|
||||
// Result of decryption (intact)
|
||||
std::string payload;
|
||||
// Server's address
|
||||
std::string host;
|
||||
// Request's target
|
||||
std::string target;
|
||||
};
|
||||
|
||||
/// We are the final destination for this request
|
||||
struct FinalDesitnationInfo {
|
||||
std::string body;
|
||||
};
|
||||
|
||||
enum class ProcessCiphertextError {
|
||||
INVALID_CIPHERTEXT,
|
||||
INVALID_JSON,
|
||||
};
|
||||
|
||||
using ParsedInfo = std::variant<RelayToNodeInfo, RelayToServerInfo,
|
||||
FinalDesitnationInfo, ProcessCiphertextError>;
|
||||
|
||||
static auto
|
||||
process_ciphertext_v1(const ChannelEncryption<std::string>& decryptor,
|
||||
const std::string& ciphertext,
|
||||
const std::string& ephem_key) -> ParsedInfo {
|
||||
|
||||
std::string plaintext;
|
||||
|
||||
try {
|
||||
if (!oxenmq::is_base64(ciphertext))
|
||||
throw std::runtime_error{"cipher text is not base64 encoded"};
|
||||
const std::string ciphertext_bin = oxenmq::from_base64(ciphertext);
|
||||
|
||||
plaintext = decryptor.decrypt_gcm(ciphertext_bin, ephem_key);
|
||||
} catch (const std::exception& e) {
|
||||
OXEN_LOG(debug, "Error decrypting an onion request: {}", e.what());
|
||||
return ProcessCiphertextError::INVALID_CIPHERTEXT;
|
||||
}
|
||||
|
||||
OXEN_LOG(debug, "onion request decrypted: (len: {})", plaintext.size());
|
||||
|
||||
try {
|
||||
|
||||
const json inner_json = json::parse(plaintext, nullptr, true);
|
||||
|
||||
if (inner_json.find("body") != inner_json.end()) {
|
||||
|
||||
auto body = inner_json.at("body").get_ref<const std::string&>();
|
||||
|
||||
OXEN_LOG(debug, "Found body: <{}>", body);
|
||||
return FinalDesitnationInfo{body};
|
||||
} else if (inner_json.find("host") != inner_json.end()) {
|
||||
|
||||
const auto& host =
|
||||
inner_json.at("host").get_ref<const std::string&>();
|
||||
const auto& target =
|
||||
inner_json.at("target").get_ref<const std::string&>();
|
||||
return RelayToServerInfo{plaintext, host, target};
|
||||
|
||||
} else {
|
||||
// We fall back to forwarding a request to the next node
|
||||
const auto& ciphertext =
|
||||
inner_json.at("ciphertext").get_ref<const std::string&>();
|
||||
const auto& dest =
|
||||
inner_json.at("destination").get_ref<const std::string&>();
|
||||
const auto& ekey =
|
||||
inner_json.at("ephemeral_key").get_ref<const std::string&>();
|
||||
|
||||
return RelayToNodeInfo{ciphertext, ekey, dest};
|
||||
}
|
||||
|
||||
} catch (std::exception& e) {
|
||||
OXEN_LOG(debug, "Error parsing inner JSON in onion request: {}",
|
||||
e.what());
|
||||
return ProcessCiphertextError::INVALID_JSON;
|
||||
}
|
||||
}
|
||||
|
||||
static auto
|
||||
process_ciphertext_v2(const ChannelEncryption<std::string>& decryptor,
|
||||
const std::string& ciphertext,
|
||||
const std::string& ephem_key) -> ParsedInfo {
|
||||
std::string plaintext;
|
||||
|
||||
try {
|
||||
plaintext = decryptor.decrypt_gcm(ciphertext, ephem_key);
|
||||
} catch (const std::exception& e) {
|
||||
OXEN_LOG(debug, "Error decrypting an onion request: {}", e.what());
|
||||
return ProcessCiphertextError::INVALID_CIPHERTEXT;
|
||||
}
|
||||
|
||||
OXEN_LOG(debug, "onion request decrypted: (len: {})", plaintext.size());
|
||||
|
||||
const auto parsed = parse_combined_payload(plaintext);
|
||||
|
||||
try {
|
||||
|
||||
const json inner_json = json::parse(parsed.json, nullptr, true);
|
||||
|
||||
/// Kind of unfortunate that we use "headers" (which is empty)
|
||||
/// to identify we are the final destination...
|
||||
if (inner_json.find("headers") != inner_json.end()) {
|
||||
|
||||
OXEN_LOG(trace, "Found body: <{}>", parsed.ciphertext);
|
||||
|
||||
/// In v2 the body is parsed.ciphertext
|
||||
return FinalDesitnationInfo{parsed.ciphertext};
|
||||
} else if (inner_json.find("host") != inner_json.end()) {
|
||||
|
||||
const auto& host =
|
||||
inner_json.at("host").get_ref<const std::string&>();
|
||||
const auto& target =
|
||||
inner_json.at("target").get_ref<const std::string&>();
|
||||
return RelayToServerInfo{plaintext, host, target};
|
||||
|
||||
} else {
|
||||
// We fall back to forwarding a request to the next node
|
||||
const auto& dest =
|
||||
inner_json.at("destination").get_ref<const std::string&>();
|
||||
const auto& ekey =
|
||||
inner_json.at("ephemeral_key").get_ref<const std::string&>();
|
||||
|
||||
return RelayToNodeInfo{parsed.ciphertext, ekey, dest};
|
||||
}
|
||||
|
||||
} catch (std::exception& e) {
|
||||
OXEN_LOG(debug, "Error parsing inner JSON in onion request: {}",
|
||||
e.what());
|
||||
return ProcessCiphertextError::INVALID_JSON;
|
||||
}
|
||||
}
|
||||
|
||||
static auto gateway_timeout() -> oxen::Response {
|
||||
return oxen::Response{Status::GATEWAY_TIMEOUT, "Request time out"};
|
||||
}
|
||||
|
||||
static auto make_status(std::string_view status) -> oxen::Status {
|
||||
|
||||
int code;
|
||||
auto res =
|
||||
std::from_chars(status.data(), status.data() + status.size(), code);
|
||||
|
||||
if (res.ec == std::errc::invalid_argument ||
|
||||
res.ec == std::errc::result_out_of_range) {
|
||||
return Status::INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
|
||||
switch (code) {
|
||||
|
||||
case 200:
|
||||
return Status::OK;
|
||||
case 400:
|
||||
return Status::BAD_REQUEST;
|
||||
case 403:
|
||||
return Status::FORBIDDEN;
|
||||
case 406:
|
||||
return Status::NOT_ACCEPTABLE;
|
||||
case 421:
|
||||
return Status::MISDIRECTED_REQUEST;
|
||||
case 432:
|
||||
return Status::INVALID_POW;
|
||||
case 500:
|
||||
return Status::INTERNAL_SERVER_ERROR;
|
||||
case 502:
|
||||
return Status::BAD_GATEWAY;
|
||||
case 503:
|
||||
return Status::SERVICE_UNAVAILABLE;
|
||||
case 504:
|
||||
return Status::GATEWAY_TIMEOUT;
|
||||
default:
|
||||
return Status::INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
static void relay_to_node(const ServiceNode& service_node,
|
||||
const RelayToNodeInfo& info,
|
||||
std::function<void(oxen::Response)> cb, int req_idx,
|
||||
bool v2) {
|
||||
|
||||
const auto& dest = info.next_node;
|
||||
const auto& payload = info.ciphertext;
|
||||
const auto& ekey = info.ephemeral_key;
|
||||
|
||||
auto dest_node = service_node.find_node_by_ed25519_pk(dest);
|
||||
|
||||
if (!dest_node) {
|
||||
auto msg = fmt::format("Next node not found: {}", dest);
|
||||
OXEN_LOG(warn, "{}", msg);
|
||||
auto res = oxen::Response{Status::BAD_GATEWAY, std::move(msg)};
|
||||
cb(std::move(res));
|
||||
return;
|
||||
}
|
||||
|
||||
nlohmann::json req_body;
|
||||
|
||||
req_body["ciphertext"] = payload;
|
||||
req_body["ephemeral_key"] = ekey;
|
||||
|
||||
auto on_response = [cb, &service_node](bool success,
|
||||
std::vector<std::string> data) {
|
||||
// Processing the result we got from upstream
|
||||
|
||||
if (!success) {
|
||||
OXEN_LOG(debug, "[Onion request] Request time out");
|
||||
cb(gateway_timeout());
|
||||
return;
|
||||
}
|
||||
|
||||
// We only expect a two-part message
|
||||
if (data.size() != 2) {
|
||||
OXEN_LOG(debug, "[Onion request] Incorrect number of messages: {}",
|
||||
data.size());
|
||||
cb(oxen::Response{Status::INTERNAL_SERVER_ERROR,
|
||||
"Incorrect number of messages from gateway"});
|
||||
return;
|
||||
}
|
||||
|
||||
/// We use http status codes (for now)
|
||||
if (data[0] != "200") {
|
||||
OXEN_LOG(debug, "Onion request relay failed with: {}", data[1]);
|
||||
}
|
||||
cb(oxen::Response{make_status(data[0]), std::move(data[1])});
|
||||
};
|
||||
|
||||
OXEN_LOG(debug, "send_onion_to_sn, sn: {} reqidx: {}", *dest_node, req_idx);
|
||||
|
||||
if (v2) {
|
||||
service_node.send_onion_to_sn_v2(*dest_node, payload, ekey,
|
||||
on_response);
|
||||
} else {
|
||||
service_node.send_onion_to_sn_v1(*dest_node, payload, ekey,
|
||||
on_response);
|
||||
}
|
||||
}
|
||||
|
||||
void RequestHandler::process_onion_req(const std::string& ciphertext,
|
||||
const std::string& ephem_key,
|
||||
std::function<void(oxen::Response)> cb,
|
||||
bool v2) {
|
||||
if (!service_node_.snode_ready()) {
|
||||
auto msg =
|
||||
fmt::format("Snode not ready: {}",
|
||||
service_node_.own_address().pubkey_ed25519_hex());
|
||||
cb(oxen::Response{Status::SERVICE_UNAVAILABLE, std::move(msg)});
|
||||
return;
|
||||
}
|
||||
|
||||
OXEN_LOG(debug, "process_onion_req, v2: {}", v2);
|
||||
|
||||
static int counter = 0;
|
||||
|
||||
ParsedInfo res;
|
||||
|
||||
if (v2) {
|
||||
res =
|
||||
process_ciphertext_v2(this->channel_cipher_, ciphertext, ephem_key);
|
||||
} else {
|
||||
res =
|
||||
process_ciphertext_v1(this->channel_cipher_, ciphertext, ephem_key);
|
||||
}
|
||||
|
||||
if (const auto info = std::get_if<FinalDesitnationInfo>(&res)) {
|
||||
|
||||
OXEN_LOG(debug, "We are the final destination in the onion request!");
|
||||
|
||||
this->process_onion_exit(
|
||||
ephem_key, info->body,
|
||||
[this, ephem_key, cb = std::move(cb)](oxen::Response res) {
|
||||
auto wrapped_res = this->wrap_proxy_response(
|
||||
res, ephem_key, true /* use aes gcm */);
|
||||
cb(std::move(wrapped_res));
|
||||
});
|
||||
|
||||
return;
|
||||
|
||||
} else if (const auto info = std::get_if<RelayToNodeInfo>(&res)) {
|
||||
|
||||
relay_to_node(this->service_node_, *info, std::move(cb), counter++, v2);
|
||||
|
||||
} else if (const auto info = std::get_if<RelayToServerInfo>(&res)) {
|
||||
OXEN_LOG(debug, "We are to forward the request to url: {}{}",
|
||||
info->host, info->target);
|
||||
|
||||
const auto& target = info->target;
|
||||
|
||||
// Forward the request to url but only if it ends in `/lsrpc`
|
||||
if ((target.rfind("/lsrpc") == target.size() - 6) &&
|
||||
(target.find('?') == std::string::npos)) {
|
||||
this->process_onion_to_url(info->host, target, info->payload,
|
||||
std::move(cb));
|
||||
|
||||
} else {
|
||||
|
||||
auto res = oxen::Response{Status::BAD_REQUEST, "Invalid url"};
|
||||
auto wrapped_res = this->wrap_proxy_response(res, ephem_key, true);
|
||||
cb(std::move(wrapped_res));
|
||||
}
|
||||
|
||||
} else if (const auto error = std::get_if<ProcessCiphertextError>(&res)) {
|
||||
switch (*error) {
|
||||
case ProcessCiphertextError::INVALID_CIPHERTEXT: {
|
||||
// Should this error be propagated back to the client? (No, if we
|
||||
// couldn't decrypt, we probably won't be able to encrypt either.)
|
||||
cb(oxen::Response{Status::BAD_REQUEST, "Invalid ciphertext"});
|
||||
break;
|
||||
}
|
||||
case ProcessCiphertextError::INVALID_JSON: {
|
||||
auto res = oxen::Response{Status::BAD_REQUEST, "Invalid json"};
|
||||
|
||||
auto wrapped_res = this->wrap_proxy_response(res, ephem_key, true);
|
||||
|
||||
cb(std::move(wrapped_res));
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
OXEN_LOG(error, "UNKNOWN VARIANT");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,124 +0,0 @@
|
|||
#include "rate_limiter.h"
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include "oxen_logger.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <assert.h>
|
||||
#include <random>
|
||||
|
||||
constexpr uint32_t RateLimiter::BUCKET_SIZE;
|
||||
constexpr uint32_t RateLimiter::TOKEN_RATE;
|
||||
constexpr uint32_t RateLimiter::TOKEN_RATE_SN;
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
// Time between to consecutive tokens for clients
|
||||
constexpr static std::chrono::microseconds TOKEN_PERIOD_US =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(1s) /
|
||||
RateLimiter::TOKEN_RATE;
|
||||
|
||||
// Time between to consecutive tokens for snodes
|
||||
constexpr static std::chrono::microseconds TOKEN_PERIOD_SN_US =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(1s) /
|
||||
RateLimiter::TOKEN_RATE_SN;
|
||||
|
||||
constexpr static std::chrono::microseconds FILL_EMPTY_BUCKET_US =
|
||||
TOKEN_PERIOD_US * RateLimiter::BUCKET_SIZE;
|
||||
|
||||
void RateLimiter::fill_bucket(TokenBucket& bucket,
|
||||
std::chrono::steady_clock::time_point now,
|
||||
bool service_node) {
|
||||
auto elapsed_us = std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
now - bucket.last_time_point);
|
||||
// clamp elapsed time to how long it takes to fill up the whole bucket
|
||||
// (simplifies overlow checking)
|
||||
elapsed_us = std::min(elapsed_us, FILL_EMPTY_BUCKET_US);
|
||||
|
||||
const auto token_period =
|
||||
service_node ? TOKEN_PERIOD_SN_US : TOKEN_PERIOD_US;
|
||||
|
||||
const uint32_t token_added = elapsed_us.count() / token_period.count();
|
||||
// clamp tokens to bucket size
|
||||
bucket.num_tokens = std::min(BUCKET_SIZE, bucket.num_tokens + token_added);
|
||||
}
|
||||
|
||||
bool RateLimiter::should_rate_limit(const std::string& identifier) {
|
||||
return should_rate_limit(identifier, std::chrono::steady_clock::now());
|
||||
}
|
||||
|
||||
bool RateLimiter::should_rate_limit(const std::string& identifier,
|
||||
std::chrono::steady_clock::time_point now) {
|
||||
const auto it = std::find_if(
|
||||
buckets_.begin(), buckets_.end(),
|
||||
[&](const buffer_pair_t& pair) { return pair.first == identifier; });
|
||||
if (it != buckets_.end()) {
|
||||
auto& bucket = it->second;
|
||||
|
||||
fill_bucket(bucket, now);
|
||||
|
||||
if (bucket.num_tokens == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bucket.num_tokens--;
|
||||
bucket.last_time_point = now;
|
||||
} else {
|
||||
const TokenBucket bucket{BUCKET_SIZE - 1, now};
|
||||
buckets_.push_back(std::make_pair(identifier, bucket));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RateLimiter::should_rate_limit_client(const std::string& identifier) {
|
||||
return should_rate_limit_client(identifier,
|
||||
std::chrono::steady_clock::now());
|
||||
}
|
||||
|
||||
bool RateLimiter::should_rate_limit_client(
|
||||
const std::string& identifier, std::chrono::steady_clock::time_point now) {
|
||||
|
||||
const auto it = client_buckets_.find(identifier);
|
||||
if (it != client_buckets_.end()) {
|
||||
auto& bucket = it->second;
|
||||
|
||||
fill_bucket(bucket, now);
|
||||
|
||||
if (bucket.num_tokens == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bucket.num_tokens--;
|
||||
bucket.last_time_point = now;
|
||||
} else {
|
||||
if (client_buckets_.size() >= MAX_CLIENTS) {
|
||||
clean_client_buckets(now);
|
||||
}
|
||||
if (client_buckets_.size() >= MAX_CLIENTS) {
|
||||
return true;
|
||||
}
|
||||
const TokenBucket bucket{BUCKET_SIZE - 1, now};
|
||||
if (!client_buckets_.insert({identifier, bucket}).second) {
|
||||
OXEN_LOG(error, "Failed to insert new client rate limit bucket");
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void RateLimiter::clean_client_buckets(
|
||||
std::chrono::steady_clock::time_point now) {
|
||||
|
||||
auto it = client_buckets_.begin();
|
||||
|
||||
while (it != client_buckets_.end()) {
|
||||
auto& bucket = it->second;
|
||||
fill_bucket(bucket, now);
|
||||
if (bucket.num_tokens == BUCKET_SIZE) {
|
||||
it = client_buckets_.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/circular_buffer.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility> // for std::pair
|
||||
|
||||
/// https://en.wikipedia.org/wiki/Token_bucket
|
||||
|
||||
class RateLimiter {
|
||||
public:
|
||||
// TODO: make those two constants command line parameters?
|
||||
constexpr static uint32_t BUCKET_SIZE = 600;
|
||||
|
||||
// Tokens (requests) per second
|
||||
constexpr static uint32_t TOKEN_RATE = 300; // Too much for a client??
|
||||
constexpr static uint32_t TOKEN_RATE_SN = 600;
|
||||
constexpr static uint32_t MAX_CLIENTS = 10000;
|
||||
|
||||
bool should_rate_limit(const std::string& identifier,
|
||||
std::chrono::steady_clock::time_point now);
|
||||
bool should_rate_limit(const std::string& identifier);
|
||||
bool should_rate_limit_client(const std::string& identifier);
|
||||
bool should_rate_limit_client(const std::string& identifier,
|
||||
std::chrono::steady_clock::time_point now);
|
||||
|
||||
private:
|
||||
struct TokenBucket {
|
||||
uint32_t num_tokens;
|
||||
std::chrono::steady_clock::time_point last_time_point;
|
||||
};
|
||||
using buffer_pair_t = std::pair<std::string, TokenBucket>;
|
||||
|
||||
boost::circular_buffer<buffer_pair_t> buckets_{128};
|
||||
|
||||
std::unordered_map<std::string, TokenBucket> client_buckets_;
|
||||
|
||||
void clean_client_buckets(std::chrono::steady_clock::time_point now);
|
||||
|
||||
// Add tokens based on the amount of time elapsed
|
||||
void fill_bucket(TokenBucket& bucket,
|
||||
std::chrono::steady_clock::time_point now,
|
||||
bool service_node = false);
|
||||
};
|
|
@ -1,256 +0,0 @@
|
|||
|
||||
#include "reachability_testing.h"
|
||||
#include "oxen_logger.h"
|
||||
|
||||
using std::chrono::steady_clock;
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace detail {
|
||||
|
||||
reach_record_t::reach_record_t() {
|
||||
this->first_failure = steady_clock::now();
|
||||
this->last_failure = this->first_failure;
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
/// How long to wait until reporting unreachable nodes to Oxend
|
||||
constexpr std::chrono::minutes UNREACH_GRACE_PERIOD = 120min;
|
||||
|
||||
bool reachability_records_t::should_report_as(const sn_pub_key_t& sn,
|
||||
ReportType type) {
|
||||
|
||||
OXEN_LOG(trace, "should_report_as");
|
||||
|
||||
using std::chrono::duration_cast;
|
||||
using std::chrono::minutes;
|
||||
|
||||
const auto it = offline_nodes_.find(sn);
|
||||
|
||||
if (it == offline_nodes_.end()) {
|
||||
// no record, we must have recordered this node as reachable already
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto& record = it->second;
|
||||
|
||||
const bool reachable = record.http_ok && record.zmq_ok;
|
||||
|
||||
if (type == ReportType::GOOD) {
|
||||
// Only report as reachable if both ports are reachable
|
||||
return reachable;
|
||||
} else {
|
||||
|
||||
if (reachable) {
|
||||
// Not sure if this happens, but check just in case
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only report as unreachable if it has been unreachable for a long time
|
||||
|
||||
const auto elapsed = record.last_failure - record.first_failure;
|
||||
const auto elapsed_min = duration_cast<minutes>(elapsed).count();
|
||||
OXEN_LOG(debug, "[reach] First time failed {} minutes ago",
|
||||
elapsed_min);
|
||||
|
||||
if (it->second.reported) {
|
||||
OXEN_LOG(debug, "[reach] Already reported node: {}", sn);
|
||||
// TODO: Might still want to report as unreachable since this status
|
||||
// gets reset to `true` on Oxend restart
|
||||
return false;
|
||||
} else if (elapsed > UNREACH_GRACE_PERIOD) {
|
||||
OXEN_LOG(debug, "[reach] Will REPORT {} to Oxend!", sn);
|
||||
return true;
|
||||
} else {
|
||||
// No need to report yet
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void reachability_records_t::check_incoming_tests(time_point_t reset_time) {
|
||||
|
||||
using std::chrono::duration_cast;
|
||||
|
||||
constexpr auto MAX_TIME_WITHOUT_PING = PING_PEERS_INTERVAL * 18;
|
||||
|
||||
const auto now = std::chrono::steady_clock::now();
|
||||
|
||||
const auto last_http = std::max(reset_time, latest_incoming_http_);
|
||||
const auto http_elapsed =
|
||||
duration_cast<std::chrono::seconds>(now - last_http);
|
||||
|
||||
// We don't want to print this once every 10 seconds:
|
||||
static time_point_t last_warning_tp{};
|
||||
const auto last_warning_elapsed = now - last_warning_tp;
|
||||
const bool would_warn = static_cast<bool>(last_warning_elapsed > 120s);
|
||||
|
||||
OXEN_LOG(debug, "Last reset or pinged via http: {}s", http_elapsed.count());
|
||||
|
||||
if (http_elapsed > MAX_TIME_WITHOUT_PING) {
|
||||
|
||||
if (would_warn) {
|
||||
if (latest_incoming_http_.time_since_epoch() == 0s) {
|
||||
OXEN_LOG(warn, "Have NEVER received http pings!");
|
||||
} else {
|
||||
OXEN_LOG(warn,
|
||||
"Have not received http pings for a long time! Last "
|
||||
"time was: "
|
||||
"{} mins ago.",
|
||||
std::chrono::duration_cast<std::chrono::minutes>(
|
||||
http_elapsed)
|
||||
.count());
|
||||
}
|
||||
|
||||
OXEN_LOG(warn, "Please check your http port. Not being reachable "
|
||||
"over http may result in a deregistration!");
|
||||
last_warning_tp = now;
|
||||
}
|
||||
|
||||
this->http_ok = false;
|
||||
} else if (!this->http_ok) {
|
||||
this->http_ok = true;
|
||||
OXEN_LOG(info, "Http port is back to OK");
|
||||
}
|
||||
|
||||
const auto last_lmq = std::max(reset_time, latest_incoming_lmq_);
|
||||
const auto lmq_elapsed =
|
||||
duration_cast<std::chrono::seconds>(now - last_lmq);
|
||||
|
||||
OXEN_LOG(debug, "Last reset or pinged via lmq: {}s", lmq_elapsed.count());
|
||||
|
||||
if (lmq_elapsed > MAX_TIME_WITHOUT_PING) {
|
||||
|
||||
if (would_warn) {
|
||||
|
||||
if (latest_incoming_lmq_.time_since_epoch() == 0s) {
|
||||
OXEN_LOG(warn, "Have NEVER received lmq pings!");
|
||||
} else {
|
||||
OXEN_LOG(
|
||||
warn,
|
||||
"Have not received lmq pings for a long time! Last time "
|
||||
"was: {} mins ago",
|
||||
duration_cast<std::chrono::minutes>(lmq_elapsed).count());
|
||||
}
|
||||
|
||||
OXEN_LOG(warn, "Please check your lmq port. Not being reachable "
|
||||
"over lmq may result in a deregistration!");
|
||||
last_warning_tp = now;
|
||||
}
|
||||
|
||||
this->lmq_ok = false;
|
||||
|
||||
} else if (!this->lmq_ok) {
|
||||
this->lmq_ok = true;
|
||||
OXEN_LOG(info, "Lmq port is back to OK");
|
||||
}
|
||||
}
|
||||
|
||||
void reachability_records_t::record_reachable(const sn_pub_key_t& sn,
|
||||
ReachType type, bool val) {
|
||||
|
||||
OXEN_LOG(trace, "record_reachable");
|
||||
|
||||
const auto it = offline_nodes_.find(sn);
|
||||
|
||||
const bool no_record = it == offline_nodes_.end();
|
||||
|
||||
if (no_record) {
|
||||
|
||||
if (val) {
|
||||
// The node is good and there is no record, so do nothing
|
||||
OXEN_LOG(debug, "[reach] Node is reachable via {} (no record) {}",
|
||||
type == ReachType::HTTP ? "HTTP" : "ZMQ", sn);
|
||||
} else {
|
||||
|
||||
detail::reach_record_t record;
|
||||
|
||||
if (type == ReachType::HTTP) {
|
||||
OXEN_LOG(
|
||||
debug,
|
||||
"[reach] Adding a new node to UNREACHABLE via HTTP: {}",
|
||||
sn);
|
||||
record.http_ok = false;
|
||||
} else if (type == ReachType::ZMQ) {
|
||||
OXEN_LOG(debug,
|
||||
"[reach] Adding a new node to UNREACHABLE via ZMQ: {}",
|
||||
sn);
|
||||
record.zmq_ok = false;
|
||||
}
|
||||
|
||||
offline_nodes_.insert({sn, record});
|
||||
}
|
||||
} else {
|
||||
|
||||
auto& record = it->second;
|
||||
|
||||
const bool reachable_before = record.http_ok && record.zmq_ok;
|
||||
// Sometimes we might still have this entry even if the node has become
|
||||
// reachable again
|
||||
|
||||
if (type == ReachType::HTTP) {
|
||||
OXEN_LOG(debug, "[reach] node {} is {} via HTTP", sn,
|
||||
val ? "OK" : "UNREACHABLE");
|
||||
record.http_ok = val;
|
||||
} else if (type == ReachType::ZMQ) {
|
||||
OXEN_LOG(debug, "[reach] node {} is {} via ZMQ", sn,
|
||||
val ? "OK" : "UNREACHABLE");
|
||||
record.zmq_ok = val;
|
||||
}
|
||||
|
||||
if (!val) {
|
||||
OXEN_LOG(debug,
|
||||
"[reach] Node is ALREADY known to be UNREACHABLE: {}, "
|
||||
"http_ok: {}, "
|
||||
"zmq_ok: {}",
|
||||
sn, record.http_ok, record.zmq_ok);
|
||||
|
||||
const auto now = steady_clock::now();
|
||||
|
||||
if (reachable_before) {
|
||||
record.first_failure = now;
|
||||
}
|
||||
|
||||
record.last_failure = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool reachability_records_t::expire(const sn_pub_key_t& sn) {
|
||||
|
||||
bool erased = offline_nodes_.erase(sn);
|
||||
if (erased)
|
||||
OXEN_LOG(debug, "[reach] Removed entry for {}", sn);
|
||||
|
||||
return erased;
|
||||
}
|
||||
|
||||
void reachability_records_t::set_reported(const sn_pub_key_t& sn) {
|
||||
|
||||
const auto it = offline_nodes_.find(sn);
|
||||
if (it != offline_nodes_.end()) {
|
||||
it->second.reported = true;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<sn_pub_key_t> reachability_records_t::next_to_test() {
|
||||
|
||||
const auto it = std::min_element(
|
||||
offline_nodes_.begin(), offline_nodes_.end(),
|
||||
[&](const auto& lhs, const auto& rhs) {
|
||||
return lhs.second.last_failure < rhs.second.last_failure;
|
||||
});
|
||||
|
||||
if (it == offline_nodes_.end()) {
|
||||
return std::nullopt;
|
||||
} else {
|
||||
|
||||
OXEN_LOG(debug, "Selecting to be re-tested: {}", it->first);
|
||||
|
||||
return it->first;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,79 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include <chrono>
|
||||
#include <unordered_map>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
constexpr std::chrono::seconds PING_PEERS_INTERVAL = 10s;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/// TODO: make this class "private"?
|
||||
class reach_record_t {
|
||||
|
||||
public:
|
||||
// The time the node failed for the first time
|
||||
// (and hasn't come back online)
|
||||
time_point_t first_failure;
|
||||
time_point_t last_failure;
|
||||
// whether it's been reported to Oxend
|
||||
bool reported = false;
|
||||
|
||||
// whether reachable over http
|
||||
bool http_ok = true;
|
||||
// whether reachable over zmq
|
||||
bool zmq_ok = true;
|
||||
|
||||
reach_record_t();
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
enum class ReachType { HTTP, ZMQ };
|
||||
|
||||
enum class ReportType { GOOD, BAD };
|
||||
|
||||
class reachability_records_t {
|
||||
|
||||
// TODO: sn_records are heavy (3 strings), so how about we only store the
|
||||
// pubkey?
|
||||
|
||||
// Nodes that failed the reachability test
|
||||
// Note: I don't expect this list to be large, so
|
||||
// `std::vector` is probably faster than `std::set` here
|
||||
std::unordered_map<sn_pub_key_t, detail::reach_record_t> offline_nodes_;
|
||||
|
||||
public:
|
||||
// The time we were last tested and reached by some other node over lmq
|
||||
time_point_t latest_incoming_lmq_;
|
||||
// The time we were last tested and reached by some other node over http
|
||||
time_point_t latest_incoming_http_;
|
||||
|
||||
// These will be set to `false` if we stop receiving lmq/http pings
|
||||
bool lmq_ok = true;
|
||||
bool http_ok = true;
|
||||
|
||||
// Check whether we received incoming pings recently
|
||||
void check_incoming_tests(time_point_t reset_time);
|
||||
|
||||
// Records node as reachable/unreachable according to `val`
|
||||
void record_reachable(const sn_pub_key_t& sn, ReachType type, bool val);
|
||||
|
||||
// return `true` if the node should be reported to Oxend as being
|
||||
// reachable or unreachable for a long time depending on `type`
|
||||
bool should_report_as(const sn_pub_key_t& sn, ReportType type);
|
||||
|
||||
// Expires a node, removing it from offline nodes. Returns true if found
|
||||
// and removed, false if it didn't exist.
|
||||
bool expire(const sn_pub_key_t& sn);
|
||||
|
||||
void set_reported(const sn_pub_key_t& sn);
|
||||
|
||||
// Retrun the least recently tested node
|
||||
std::optional<sn_pub_key_t> next_to_test();
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,537 +0,0 @@
|
|||
#include "request_handler.h"
|
||||
#include "channel_encryption.hpp"
|
||||
#include "http_connection.h"
|
||||
#include "oxen_logger.h"
|
||||
#include "service_node.h"
|
||||
#include "utils.hpp"
|
||||
|
||||
#include "https_client.h"
|
||||
|
||||
#include <oxenmq/base64.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using nlohmann::json;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
constexpr size_t MAX_MESSAGE_BODY = 102400; // 100 KB limit
|
||||
|
||||
std::string to_string(const Response& res) {
|
||||
|
||||
std::stringstream ss;
|
||||
|
||||
ss << "Status: " << static_cast<int>(res.status()) << ", ";
|
||||
ss << "ContentType: "
|
||||
<< ((res.content_type() == ContentType::plaintext) ? "plaintext"
|
||||
: "json")
|
||||
<< ", ";
|
||||
ss << "Body: <" << res.message() << ">";
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
RequestHandler::RequestHandler(boost::asio::io_context& ioc, ServiceNode& sn,
|
||||
const OxendClient& oxend_client,
|
||||
const ChannelEncryption<std::string>& ce)
|
||||
: ioc_(ioc), service_node_(sn), oxend_client_(oxend_client),
|
||||
channel_cipher_(ce) {}
|
||||
|
||||
static json snodes_to_json(const std::vector<sn_record_t>& snodes) {
|
||||
|
||||
json res_body;
|
||||
json snodes_json = json::array();
|
||||
|
||||
for (const auto& sn : snodes) {
|
||||
json snode;
|
||||
snode["address"] = sn.sn_address();
|
||||
snode["pubkey_x25519"] = sn.pubkey_x25519_hex();
|
||||
snode["pubkey_ed25519"] = sn.pubkey_ed25519_hex();
|
||||
snode["port"] = std::to_string(sn.port());
|
||||
snode["ip"] = sn.ip();
|
||||
snodes_json.push_back(snode);
|
||||
}
|
||||
|
||||
res_body["snodes"] = snodes_json;
|
||||
|
||||
return res_body;
|
||||
}
|
||||
|
||||
static std::string obfuscate_pubkey(const std::string& pk) {
|
||||
std::string res = pk.substr(0, 2);
|
||||
res += "...";
|
||||
res += pk.substr(pk.length() - 3, pk.length() - 1);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// TODO: this probably shouldn't return Response...
|
||||
Response RequestHandler::handle_wrong_swarm(const user_pubkey_t& pubKey) {
|
||||
|
||||
const std::vector<sn_record_t> nodes =
|
||||
service_node_.get_snodes_by_pk(pubKey);
|
||||
const json res_body = snodes_to_json(nodes);
|
||||
|
||||
OXEN_LOG(trace, "Got client request to a wrong swarm");
|
||||
|
||||
return Response{Status::MISDIRECTED_REQUEST, res_body.dump(),
|
||||
ContentType::json};
|
||||
}
|
||||
|
||||
Response RequestHandler::process_store(const json& params) {
|
||||
|
||||
constexpr const char* fields[] = {"pubKey", "ttl", "nonce", "timestamp",
|
||||
"data"};
|
||||
|
||||
for (const auto& field : fields) {
|
||||
if (!params.contains(field)) {
|
||||
|
||||
OXEN_LOG(debug, "Bad client request: no `{}` field", field);
|
||||
return Response{
|
||||
Status::BAD_REQUEST,
|
||||
fmt::format("invalid json: no `{}` field\n", field)};
|
||||
}
|
||||
}
|
||||
|
||||
const auto& ttl = params.at("ttl").get_ref<const std::string&>();
|
||||
const auto& nonce = params.at("nonce").get_ref<const std::string&>();
|
||||
const auto& timestamp =
|
||||
params.at("timestamp").get_ref<const std::string&>();
|
||||
const auto& data = params.at("data").get_ref<const std::string&>();
|
||||
|
||||
OXEN_LOG(trace, "Storing message: {}", data);
|
||||
|
||||
bool created;
|
||||
auto pk =
|
||||
user_pubkey_t::create(params.at("pubKey").get<std::string>(), created);
|
||||
|
||||
if (!created) {
|
||||
auto msg = fmt::format("Pubkey must be {} characters long\n",
|
||||
get_user_pubkey_size());
|
||||
OXEN_LOG(debug, "{}", msg);
|
||||
return Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
}
|
||||
|
||||
if (data.size() > MAX_MESSAGE_BODY) {
|
||||
OXEN_LOG(debug, "Message body too long: {}", data.size());
|
||||
|
||||
auto msg =
|
||||
fmt::format("Message body exceeds maximum allowed length of {}\n",
|
||||
MAX_MESSAGE_BODY);
|
||||
return Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
}
|
||||
|
||||
if (!service_node_.is_pubkey_for_us(pk)) {
|
||||
return this->handle_wrong_swarm(pk);
|
||||
}
|
||||
|
||||
uint64_t ttlInt;
|
||||
if (!util::parseTTL(ttl, ttlInt)) {
|
||||
OXEN_LOG(debug, "Forbidden. Invalid TTL: {}", ttl);
|
||||
return Response{Status::FORBIDDEN, "Provided TTL is not valid.\n"};
|
||||
}
|
||||
|
||||
uint64_t timestampInt;
|
||||
if (!util::parseTimestamp(timestamp, ttlInt, timestampInt)) {
|
||||
OXEN_LOG(debug, "Forbidden. Invalid Timestamp: {}", timestamp);
|
||||
return Response{Status::NOT_ACCEPTABLE,
|
||||
"Timestamp error: check your clock\n"};
|
||||
}
|
||||
|
||||
// Do not store message if the PoW provided is invalid
|
||||
std::string messageHash;
|
||||
|
||||
const bool valid_pow =
|
||||
checkPoW(nonce, timestamp, ttl, pk.str(), data, messageHash,
|
||||
service_node_.get_curr_pow_difficulty());
|
||||
#ifndef DISABLE_POW
|
||||
if (!valid_pow) {
|
||||
OXEN_LOG(debug, "Forbidden. Invalid PoW nonce: {}", nonce);
|
||||
|
||||
json res_body;
|
||||
res_body["difficulty"] = service_node_.get_curr_pow_difficulty();
|
||||
|
||||
return Response{Status::INVALID_POW, res_body.dump(),
|
||||
ContentType::json};
|
||||
}
|
||||
#endif
|
||||
|
||||
bool success;
|
||||
|
||||
try {
|
||||
const auto msg =
|
||||
message_t{pk.str(), data, messageHash, ttlInt, timestampInt, nonce};
|
||||
success = service_node_.process_store(msg);
|
||||
} catch (std::exception e) {
|
||||
OXEN_LOG(critical,
|
||||
"Internal Server Error. Could not store message for {}",
|
||||
obfuscate_pubkey(pk.str()));
|
||||
return Response{Status::INTERNAL_SERVER_ERROR, e.what()};
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
|
||||
OXEN_LOG(warn, "Service node is initializing");
|
||||
return Response{Status::SERVICE_UNAVAILABLE,
|
||||
"Service node is initializing\n"};
|
||||
}
|
||||
|
||||
OXEN_LOG(trace, "Successfully stored message for {}",
|
||||
obfuscate_pubkey(pk.str()));
|
||||
|
||||
json res_body;
|
||||
res_body["difficulty"] = service_node_.get_curr_pow_difficulty();
|
||||
|
||||
return Response{Status::OK, res_body.dump(), ContentType::json};
|
||||
}
|
||||
|
||||
Response RequestHandler::process_retrieve_all() {
|
||||
|
||||
std::vector<storage::Item> all_entries;
|
||||
|
||||
bool res = service_node_.get_all_messages(all_entries);
|
||||
|
||||
if (!res) {
|
||||
return Response{Status::INTERNAL_SERVER_ERROR,
|
||||
"could not retrieve all entries\n"};
|
||||
}
|
||||
|
||||
json messages = json::array();
|
||||
|
||||
for (auto& entry : all_entries) {
|
||||
json item;
|
||||
item["data"] = entry.data;
|
||||
item["pk"] = entry.pub_key;
|
||||
messages.push_back(item);
|
||||
}
|
||||
|
||||
json res_body;
|
||||
res_body["messages"] = messages;
|
||||
|
||||
return Response{Status::OK, res_body.dump(), ContentType::json};
|
||||
}
|
||||
|
||||
Response RequestHandler::process_snodes_by_pk(const json& params) const {
|
||||
|
||||
if (!params.contains("pubKey")) {
|
||||
OXEN_LOG(debug, "Bad client request: no `pubKey` field");
|
||||
return Response{Status::BAD_REQUEST,
|
||||
"invalid json: no `pubKey` field\n"};
|
||||
}
|
||||
|
||||
bool success;
|
||||
const auto pk =
|
||||
user_pubkey_t::create(params.at("pubKey").get<std::string>(), success);
|
||||
if (!success) {
|
||||
|
||||
auto msg = fmt::format("Pubkey must be {} characters long\n",
|
||||
get_user_pubkey_size());
|
||||
OXEN_LOG(debug, "{}", msg);
|
||||
return Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
}
|
||||
|
||||
const std::vector<sn_record_t> nodes = service_node_.get_snodes_by_pk(pk);
|
||||
|
||||
OXEN_LOG(debug, "Snodes by pk size: {}", nodes.size());
|
||||
|
||||
const json res_body = snodes_to_json(nodes);
|
||||
|
||||
OXEN_LOG(debug, "Snodes by pk: {}", res_body.dump());
|
||||
|
||||
return Response{Status::OK, res_body.dump(), ContentType::json};
|
||||
}
|
||||
|
||||
Response RequestHandler::process_retrieve(const json& params) {
|
||||
|
||||
constexpr const char* fields[] = {"pubKey", "lastHash"};
|
||||
|
||||
for (const auto& field : fields) {
|
||||
if (!params.contains(field)) {
|
||||
auto msg = fmt::format("invalid json: no `{}` field", field);
|
||||
OXEN_LOG(debug, "{}", msg);
|
||||
return Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
}
|
||||
}
|
||||
|
||||
bool success;
|
||||
const auto pk =
|
||||
user_pubkey_t::create(params["pubKey"].get<std::string>(), success);
|
||||
|
||||
if (!success) {
|
||||
|
||||
auto msg = fmt::format("Pubkey must be {} characters long\n",
|
||||
get_user_pubkey_size());
|
||||
OXEN_LOG(debug, "{}", msg);
|
||||
return Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
}
|
||||
|
||||
if (!service_node_.is_pubkey_for_us(pk)) {
|
||||
return this->handle_wrong_swarm(pk);
|
||||
}
|
||||
|
||||
const std::string& last_hash =
|
||||
params.at("lastHash").get_ref<const std::string&>();
|
||||
|
||||
// Note: We removed long-polling
|
||||
|
||||
std::vector<storage::Item> items;
|
||||
|
||||
if (!service_node_.retrieve(pk.str(), last_hash, items)) {
|
||||
|
||||
auto msg = fmt::format(
|
||||
"Internal Server Error. Could not retrieve messages for {}",
|
||||
obfuscate_pubkey(pk.str()));
|
||||
OXEN_LOG(critical, "{}", msg);
|
||||
|
||||
return Response{Status::INTERNAL_SERVER_ERROR, std::move(msg)};
|
||||
}
|
||||
|
||||
if (!items.empty()) {
|
||||
OXEN_LOG(trace, "Successfully retrieved messages for {}",
|
||||
obfuscate_pubkey(pk.str()));
|
||||
}
|
||||
|
||||
json res_body;
|
||||
json messages = json::array();
|
||||
|
||||
for (const auto& item : items) {
|
||||
json message;
|
||||
message["hash"] = item.hash;
|
||||
/// TODO: calculate expiration time once only?
|
||||
message["expiration"] = item.timestamp + item.ttl;
|
||||
message["data"] = item.data;
|
||||
messages.push_back(message);
|
||||
}
|
||||
|
||||
res_body["messages"] = messages;
|
||||
|
||||
return Response{Status::OK, res_body.dump(), ContentType::json};
|
||||
}
|
||||
|
||||
void RequestHandler::process_client_req(
|
||||
const std::string& req_json, std::function<void(oxen::Response)> cb) {
|
||||
|
||||
OXEN_LOG(trace, "process_client_req str <{}>", req_json);
|
||||
|
||||
const json body = json::parse(req_json, nullptr, false);
|
||||
if (body == nlohmann::detail::value_t::discarded) {
|
||||
OXEN_LOG(debug, "Bad client request: invalid json");
|
||||
cb(Response{Status::BAD_REQUEST, "invalid json\n"});
|
||||
}
|
||||
|
||||
OXEN_LOG(trace, "process_client_req json <{}>", body.dump(2));
|
||||
|
||||
const auto method_it = body.find("method");
|
||||
if (method_it == body.end() || !method_it->is_string()) {
|
||||
OXEN_LOG(debug, "Bad client request: no method field");
|
||||
cb(Response{Status::BAD_REQUEST, "invalid json: no `method` field\n"});
|
||||
}
|
||||
|
||||
const auto& method_name = method_it->get_ref<const std::string&>();
|
||||
|
||||
OXEN_LOG(trace, " - method name: {}", method_name);
|
||||
|
||||
const auto params_it = body.find("params");
|
||||
if (params_it == body.end() || !params_it->is_object()) {
|
||||
OXEN_LOG(debug, "Bad client request: no params field");
|
||||
cb(Response{Status::BAD_REQUEST, "invalid json: no `params` field\n"});
|
||||
}
|
||||
|
||||
if (method_name == "store") {
|
||||
OXEN_LOG(debug, "Process client request: store");
|
||||
cb(this->process_store(*params_it));
|
||||
|
||||
} else if (method_name == "retrieve") {
|
||||
OXEN_LOG(debug, "Process client request: retrieve");
|
||||
cb(this->process_retrieve(*params_it));
|
||||
// TODO: maybe we should check if (some old) clients requests
|
||||
// long-polling and then wait before responding to prevent spam
|
||||
|
||||
} else if (method_name == "get_snodes_for_pubkey") {
|
||||
OXEN_LOG(debug, "Process client request: snodes for pubkey");
|
||||
cb(this->process_snodes_by_pk(*params_it));
|
||||
} else if (method_name == "get_lns_mapping") {
|
||||
|
||||
const auto name_it = params_it->find("name_hash");
|
||||
if (name_it == params_it->end()) {
|
||||
cb(Response{Status::BAD_REQUEST, "Field <name_hash> is missing"});
|
||||
} else {
|
||||
this->process_lns_request(*name_it, std::move(cb));
|
||||
}
|
||||
|
||||
} else {
|
||||
OXEN_LOG(debug, "Bad client request: unknown method '{}'", method_name);
|
||||
cb(Response{Status::BAD_REQUEST,
|
||||
fmt::format("no method {}", method_name)});
|
||||
}
|
||||
}
|
||||
|
||||
Response RequestHandler::wrap_proxy_response(const Response& res,
|
||||
const std::string& client_key,
|
||||
bool use_gcm) const {
|
||||
|
||||
nlohmann::json json_res;
|
||||
|
||||
json_res["status"] = res.status();
|
||||
json_res["body"] = res.message();
|
||||
|
||||
const std::string res_body = json_res.dump();
|
||||
|
||||
std::string ciphertext;
|
||||
|
||||
if (use_gcm) {
|
||||
ciphertext = oxenmq::to_base64(
|
||||
channel_cipher_.encrypt_gcm(res_body, client_key));
|
||||
} else {
|
||||
ciphertext = oxenmq::to_base64(
|
||||
channel_cipher_.encrypt_cbc(res_body, client_key));
|
||||
}
|
||||
|
||||
// why does this have to be json???
|
||||
return Response{Status::OK, std::move(ciphertext), ContentType::json};
|
||||
}
|
||||
|
||||
void RequestHandler::process_lns_request(
|
||||
std::string name_hash, std::function<void(oxen::Response)> cb) {
|
||||
|
||||
json params;
|
||||
json array = json::array();
|
||||
json entry;
|
||||
|
||||
entry["name_hash"] = std::move(name_hash);
|
||||
|
||||
json types = json::array();
|
||||
types.push_back(0);
|
||||
entry["types"] = types;
|
||||
|
||||
array.push_back(entry);
|
||||
params["entries"] = array;
|
||||
|
||||
// this should not be called "sn response"
|
||||
auto on_oxend_res = [cb = std::move(cb)](sn_response_t sn) {
|
||||
if (sn.error_code == SNodeError::NO_ERROR && sn.body) {
|
||||
cb({Status::OK, *sn.body});
|
||||
} else {
|
||||
cb({Status::BAD_REQUEST, "unknown oxend error"});
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef INTEGRATION_TEST
|
||||
// use mainnet seed
|
||||
oxend_client_.make_custom_oxend_request("public.loki.foundation", 22023,
|
||||
"lns_names_to_owners", params,
|
||||
std::move(on_oxend_res));
|
||||
#else
|
||||
oxend_client_.make_oxend_request("lns_names_to_owners", params,
|
||||
std::move(on_oxend_res));
|
||||
#endif
|
||||
}
|
||||
|
||||
void RequestHandler::process_onion_exit(
|
||||
const std::string& eph_key, const std::string& body,
|
||||
std::function<void(oxen::Response)> cb) {
|
||||
|
||||
OXEN_LOG(debug, "Processing onion exit!");
|
||||
|
||||
if (!service_node_.snode_ready()) {
|
||||
cb({Status::SERVICE_UNAVAILABLE, "Snode not ready"});
|
||||
return;
|
||||
}
|
||||
|
||||
this->process_client_req(body, std::move(cb));
|
||||
}
|
||||
|
||||
void RequestHandler::process_proxy_exit(
|
||||
const std::string& client_key, const std::string& payload,
|
||||
std::function<void(oxen::Response)> cb) {
|
||||
|
||||
if (!service_node_.snode_ready()) {
|
||||
auto res = Response{Status::SERVICE_UNAVAILABLE, "Snode not ready"};
|
||||
cb(wrap_proxy_response(res, client_key, false));
|
||||
return;
|
||||
}
|
||||
|
||||
static int proxy_idx = 0;
|
||||
|
||||
int idx = proxy_idx++;
|
||||
|
||||
OXEN_LOG(debug, "[{}] Process proxy exit", idx);
|
||||
|
||||
std::string plaintext;
|
||||
|
||||
try {
|
||||
plaintext = channel_cipher_.decrypt_cbc(payload, client_key);
|
||||
} catch (const std::exception& e) {
|
||||
auto msg = fmt::format("Invalid ciphertext: {}", e.what());
|
||||
OXEN_LOG(debug, "{}", msg);
|
||||
auto res = Response{Status::BAD_REQUEST, std::move(msg)};
|
||||
|
||||
// TODO: since we always seem to encrypt the response, we should
|
||||
// do it once one level above instead
|
||||
cb(wrap_proxy_response(res, client_key, false));
|
||||
return;
|
||||
}
|
||||
|
||||
std::string body;
|
||||
|
||||
bool lp_used = false;
|
||||
|
||||
try {
|
||||
const json req = json::parse(plaintext, nullptr, true);
|
||||
body = req.at("body").get<std::string>();
|
||||
|
||||
if (req.find("headers") != req.end()) {
|
||||
if (req.at("headers").find(OXEN_LONG_POLL_HEADER) !=
|
||||
req.at("headers").end()) {
|
||||
lp_used =
|
||||
req.at("headers").at(OXEN_LONG_POLL_HEADER).get<bool>();
|
||||
}
|
||||
}
|
||||
|
||||
} catch (std::exception& e) {
|
||||
auto msg = fmt::format("JSON parsing error: {}", e.what());
|
||||
OXEN_LOG(debug, "[{}] {}", idx, msg);
|
||||
auto res = Response{Status::BAD_REQUEST, msg};
|
||||
cb(wrap_proxy_response(res, client_key, false /* use cbc */));
|
||||
return;
|
||||
}
|
||||
|
||||
if (lp_used) {
|
||||
OXEN_LOG(debug, "Long polling requested over a proxy request");
|
||||
}
|
||||
|
||||
this->process_client_req(
|
||||
body, [this, cb = std::move(cb), client_key, idx](oxen::Response res) {
|
||||
OXEN_LOG(debug, "[{}] proxy about to respond with: {}", idx,
|
||||
res.status());
|
||||
|
||||
cb(wrap_proxy_response(res, client_key, false /* use cbc */));
|
||||
});
|
||||
}
|
||||
|
||||
void RequestHandler::process_onion_to_url(
|
||||
const std::string& host, const std::string& target,
|
||||
const std::string& payload, std::function<void(oxen::Response)> cb) {
|
||||
|
||||
// TODO: investigate if the use of a shared pointer is necessary
|
||||
auto req = std::make_shared<request_t>();
|
||||
|
||||
req->body() = payload;
|
||||
req->set(http::field::host, host);
|
||||
req->method(http::verb::post);
|
||||
req->target(target);
|
||||
|
||||
req->prepare_payload();
|
||||
|
||||
// `cb` needs to be adapted for http request
|
||||
auto http_cb = [cb = std::move(cb)](sn_response_t res) {
|
||||
if (res.error_code == SNodeError::NO_ERROR) {
|
||||
cb(oxen::Response{Status::OK, *res.body});
|
||||
} else {
|
||||
OXEN_LOG(debug, "Oxen server error: {}", res.error_code);
|
||||
cb(oxen::Response{Status::BAD_REQUEST, "Oxen Server error"});
|
||||
}
|
||||
};
|
||||
|
||||
make_https_request(ioc_, host, req, http_cb);
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,139 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
// TODO: move ChannelEncryption to ::oxen
|
||||
template <typename T>
|
||||
class ChannelEncryption;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
class ServiceNode;
|
||||
class OxendClient;
|
||||
|
||||
enum class Status {
|
||||
OK = 200,
|
||||
BAD_REQUEST = 400,
|
||||
FORBIDDEN = 403,
|
||||
NOT_ACCEPTABLE = 406,
|
||||
MISDIRECTED_REQUEST = 421,
|
||||
INVALID_POW = 432, // unassigned http code
|
||||
SERVICE_UNAVAILABLE = 503,
|
||||
INTERNAL_SERVER_ERROR = 500,
|
||||
BAD_GATEWAY = 502,
|
||||
GATEWAY_TIMEOUT = 504,
|
||||
};
|
||||
|
||||
enum class ContentType {
|
||||
plaintext,
|
||||
json,
|
||||
};
|
||||
|
||||
namespace ss_client {
|
||||
|
||||
enum class ReqMethod {
|
||||
DATA, // Database entries
|
||||
PROXY_EXIT, // A session client request coming through a proxy
|
||||
ONION_REQUEST,
|
||||
};
|
||||
|
||||
class Request {
|
||||
|
||||
public:
|
||||
std::string body;
|
||||
// Might change this to a vector later
|
||||
std::map<std::string, std::string> headers;
|
||||
};
|
||||
|
||||
}; // namespace ss_client
|
||||
|
||||
class Response {
|
||||
|
||||
Status status_;
|
||||
std::string message_;
|
||||
ContentType content_type_;
|
||||
|
||||
public:
|
||||
Response(Status s, std::string m, ContentType ct = ContentType::plaintext)
|
||||
: status_(s), message_(std::move(m)), content_type_(ct) {}
|
||||
|
||||
const std::string& message() const { return message_; }
|
||||
Status status() const { return status_; }
|
||||
ContentType content_type() const { return content_type_; }
|
||||
};
|
||||
|
||||
std::string to_string(const Response& res);
|
||||
|
||||
class RequestHandler {
|
||||
|
||||
ServiceNode& service_node_;
|
||||
const OxendClient& oxend_client_;
|
||||
const ChannelEncryption<std::string>& channel_cipher_;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
|
||||
// Wrap response `res` to an intermediate node
|
||||
Response wrap_proxy_response(const Response& res,
|
||||
const std::string& client_key,
|
||||
bool use_gcm) const;
|
||||
|
||||
// Return the correct swarm for `pubKey`
|
||||
Response handle_wrong_swarm(const user_pubkey_t& pubKey);
|
||||
|
||||
// ===== Session Client Requests =====
|
||||
|
||||
// Similar to `handle_wrong_swarm`; but used when the swarm is requested
|
||||
// explicitly
|
||||
Response process_snodes_by_pk(const nlohmann::json& params) const;
|
||||
|
||||
// Save the message and relay the swarm
|
||||
Response process_store(const nlohmann::json& params);
|
||||
|
||||
// Query the database and return requested messages
|
||||
Response process_retrieve(const nlohmann::json& params);
|
||||
|
||||
void process_onion_exit(const std::string& eph_key,
|
||||
const std::string& payload,
|
||||
std::function<void(oxen::Response)> cb);
|
||||
|
||||
void process_lns_request(std::string name_hash,
|
||||
std::function<void(oxen::Response)> cb);
|
||||
|
||||
// ===================================
|
||||
|
||||
public:
|
||||
RequestHandler(boost::asio::io_context& ioc, ServiceNode& sn,
|
||||
const OxendClient& oxend_client,
|
||||
const ChannelEncryption<std::string>& ce);
|
||||
|
||||
// Process all Session client requests
|
||||
void process_client_req(const std::string& req_json,
|
||||
std::function<void(oxen::Response)> cb);
|
||||
|
||||
// Test only: retrieve all db entires
|
||||
Response process_retrieve_all();
|
||||
|
||||
// Handle a Session client reqeust sent via SN proxy
|
||||
void process_proxy_exit(const std::string& client_key,
|
||||
const std::string& payload,
|
||||
std::function<void(oxen::Response)> cb);
|
||||
|
||||
void process_onion_to_url(const std::string& host,
|
||||
const std::string& target,
|
||||
const std::string& payload,
|
||||
std::function<void(oxen::Response)> cb);
|
||||
|
||||
// The result will arrive asynchronously, so it needs a callback handler
|
||||
void process_onion_req(const std::string& ciphertext,
|
||||
const std::string& ephem_key,
|
||||
std::function<void(oxen::Response)> cb,
|
||||
// Whether to use the new v2 protocol
|
||||
bool v2 = false);
|
||||
};
|
||||
} // namespace oxen
|
|
@ -1,43 +0,0 @@
|
|||
#include "security.h"
|
||||
#include "oxend_key.h"
|
||||
#include "signature.h"
|
||||
|
||||
#include <oxenmq/base64.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
|
||||
namespace oxen {
|
||||
Security::Security(const oxend_key_pair_t& key_pair,
|
||||
const std::filesystem::path& base_path)
|
||||
: key_pair_(key_pair), base_path_(base_path) {}
|
||||
|
||||
std::string Security::base64_sign(const std::string& body) {
|
||||
const auto hash = hash_data(body);
|
||||
const auto sig = generate_signature(hash, key_pair_);
|
||||
std::string raw_sig;
|
||||
raw_sig.reserve(sig.c.size() + sig.r.size());
|
||||
raw_sig.insert(raw_sig.begin(), sig.c.begin(), sig.c.end());
|
||||
raw_sig.insert(raw_sig.end(), sig.r.begin(), sig.r.end());
|
||||
return oxenmq::to_base64(raw_sig);
|
||||
}
|
||||
|
||||
void Security::generate_cert_signature() {
|
||||
std::ifstream file((base_path_ / "cert.pem").string());
|
||||
if (!file.is_open()) {
|
||||
throw std::runtime_error("Could not find cert.pem");
|
||||
}
|
||||
std::string cert_pem((std::istreambuf_iterator<char>(file)),
|
||||
std::istreambuf_iterator<char>());
|
||||
const auto hash = hash_data(cert_pem);
|
||||
const auto sig = generate_signature(hash, key_pair_);
|
||||
std::string raw_sig;
|
||||
raw_sig.reserve(sig.c.size() + sig.r.size());
|
||||
raw_sig.insert(raw_sig.begin(), sig.c.begin(), sig.c.end());
|
||||
raw_sig.insert(raw_sig.end(), sig.r.begin(), sig.r.end());
|
||||
|
||||
cert_signature_ = oxenmq::to_base64(raw_sig);
|
||||
}
|
||||
|
||||
std::string Security::get_cert_signature() const { return cert_signature_; }
|
||||
} // namespace oxen
|
|
@ -1,25 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
struct oxend_key_pair_t;
|
||||
|
||||
class Security {
|
||||
public:
|
||||
Security(const oxend_key_pair_t& key_pair,
|
||||
const std::filesystem::path& base_path);
|
||||
|
||||
std::string base64_sign(const std::string& body);
|
||||
void generate_cert_signature();
|
||||
std::string get_cert_signature() const;
|
||||
|
||||
private:
|
||||
const oxend_key_pair_t& key_pair_;
|
||||
std::string cert_signature_;
|
||||
std::filesystem::path base_path_;
|
||||
};
|
||||
} // namespace oxen
|
|
@ -1,195 +0,0 @@
|
|||
#include "serialization.h"
|
||||
|
||||
/// TODO: should only be aware of messages
|
||||
#include "Item.hpp"
|
||||
#include "oxen_logger.h"
|
||||
#include "service_node.h"
|
||||
|
||||
#include <boost/endian/conversion.hpp>
|
||||
#include <boost/format.hpp>
|
||||
|
||||
using oxen::storage::Item;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
template <typename T>
|
||||
static T deserialize_integer(std::string::const_iterator& it) {
|
||||
|
||||
const auto b1 = reinterpret_cast<const T&>(*it);
|
||||
it += sizeof(T);
|
||||
return boost::endian::little_to_native(b1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void serialize_integer(std::string& buf, T a) {
|
||||
boost::endian::native_to_little_inplace(a);
|
||||
const auto p = reinterpret_cast<const char*>(&a);
|
||||
buf.insert(buf.size(), p, sizeof(T));
|
||||
}
|
||||
|
||||
static void serialize(std::string& buf, const std::string& str) {
|
||||
|
||||
buf.reserve(buf.size() + str.size() + 4);
|
||||
serialize_integer(buf, str.size());
|
||||
buf += str;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void serialize_message(std::string& res, const T& msg) {
|
||||
|
||||
/// TODO: use binary / base64 representation for pk
|
||||
res += msg.pub_key;
|
||||
serialize(res, msg.hash);
|
||||
serialize(res, msg.data);
|
||||
serialize_integer(res, msg.ttl);
|
||||
serialize_integer(res, msg.timestamp);
|
||||
serialize(res, msg.nonce);
|
||||
|
||||
OXEN_LOG(trace, "serialized message: {}", msg.data);
|
||||
}
|
||||
|
||||
template void serialize_message(std::string& res, const message_t& msg);
|
||||
template void serialize_message(std::string& res, const Item& msg);
|
||||
|
||||
template <typename T>
|
||||
std::vector<std::string> serialize_messages(const std::vector<T>& msgs) {
|
||||
|
||||
std::vector<std::string> res;
|
||||
|
||||
std::string buf;
|
||||
|
||||
constexpr size_t BATCH_SIZE = 500000;
|
||||
|
||||
for (const auto& msg : msgs) {
|
||||
serialize_message(buf, msg);
|
||||
if (buf.size() > BATCH_SIZE) {
|
||||
res.push_back(std::move(buf));
|
||||
buf.clear();
|
||||
}
|
||||
}
|
||||
|
||||
if (!buf.empty()) {
|
||||
res.push_back(std::move(buf));
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template std::vector<std::string>
|
||||
serialize_messages(const std::vector<message_t>& msgs);
|
||||
|
||||
template std::vector<std::string>
|
||||
serialize_messages(const std::vector<Item>& msgs);
|
||||
|
||||
struct string_view {
|
||||
|
||||
std::string::const_iterator it;
|
||||
const std::string::const_iterator it_end;
|
||||
|
||||
string_view(const std::string& data)
|
||||
: it(data.begin()), it_end(data.end()) {}
|
||||
|
||||
size_t size() { return it_end - it; }
|
||||
|
||||
bool empty() { return it_end <= it; }
|
||||
};
|
||||
|
||||
static std::optional<std::string> deserialize_string(string_view& slice,
|
||||
size_t len) {
|
||||
|
||||
if (slice.size() < len) {
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
const auto res = std::string(slice.it, slice.it + len);
|
||||
slice.it += len;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static std::optional<std::string> deserialize_string(string_view& slice) {
|
||||
|
||||
if (slice.size() < sizeof(size_t))
|
||||
return std::nullopt;
|
||||
|
||||
const auto len =
|
||||
deserialize_integer<size_t>(slice.it); // already increments `it`!
|
||||
|
||||
return deserialize_string(slice, len);
|
||||
}
|
||||
|
||||
static std::optional<uint64_t> deserialize_uint64(string_view& slice) {
|
||||
|
||||
if (slice.size() < sizeof(uint64_t))
|
||||
return std::nullopt;
|
||||
|
||||
const auto res = deserialize_integer<uint64_t>(slice.it);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::vector<message_t> deserialize_messages(const std::string& blob) {
|
||||
|
||||
OXEN_LOG(trace, "=== Deserializing ===");
|
||||
|
||||
std::vector<message_t> result;
|
||||
|
||||
string_view slice{blob};
|
||||
|
||||
while (!slice.empty()) {
|
||||
|
||||
/// Deserialize PK
|
||||
auto pk = deserialize_string(slice, oxen::get_user_pubkey_size());
|
||||
if (!pk) {
|
||||
OXEN_LOG(debug, "Could not deserialize pk");
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Deserialize Hash
|
||||
auto hash = deserialize_string(slice);
|
||||
if (!hash) {
|
||||
OXEN_LOG(debug, "Could not deserialize hash");
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Deserialize Data
|
||||
auto data = deserialize_string(slice);
|
||||
if (!data) {
|
||||
OXEN_LOG(debug, "Could not deserialize data");
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Deserialize TTL
|
||||
auto ttl = deserialize_uint64(slice);
|
||||
if (!ttl) {
|
||||
OXEN_LOG(debug, "Could not deserialize ttl");
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Deserialize Timestamp
|
||||
auto timestamp = deserialize_uint64(slice);
|
||||
if (!timestamp) {
|
||||
OXEN_LOG(debug, "Could not deserialize timestamp");
|
||||
return {};
|
||||
}
|
||||
|
||||
/// Deserialize Nonce
|
||||
auto nonce = deserialize_string(slice);
|
||||
if (!nonce) {
|
||||
OXEN_LOG(debug, "Could not deserialize nonce");
|
||||
return {};
|
||||
}
|
||||
|
||||
OXEN_LOG(trace, "Deserialized data: {}", *data);
|
||||
|
||||
OXEN_LOG(trace, "pk: {}, msg: {}", *pk, *data);
|
||||
|
||||
result.push_back({*pk, *data, *hash, *ttl, *timestamp, *nonce});
|
||||
}
|
||||
|
||||
OXEN_LOG(trace, "=== END ===");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,22 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace storage {
|
||||
struct Item;
|
||||
}
|
||||
|
||||
struct message_t;
|
||||
|
||||
template <typename T>
|
||||
void serialize_message(std::string& buf, const T& msg);
|
||||
|
||||
template <typename T>
|
||||
std::vector<std::string> serialize_messages(const std::vector<T>& msgs);
|
||||
|
||||
std::vector<message_t> deserialize_messages(const std::string& blob);
|
||||
|
||||
} // namespace oxen
|
|
@ -1,232 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_logger.h"
|
||||
#include <boost/asio/buffer.hpp>
|
||||
#include <boost/asio/ssl/context.hpp>
|
||||
|
||||
#include <openssl/conf.h>
|
||||
#include <openssl/crypto.h>
|
||||
#include <openssl/dh.h>
|
||||
#include <openssl/evp.h>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/x509v3.h>
|
||||
|
||||
#include <cstddef>
|
||||
#include <fstream>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
|
||||
void generate_dh_pem(const std::filesystem::path& dh_path) {
|
||||
const int prime_len = 2048;
|
||||
const int generator = DH_GENERATOR_2;
|
||||
DH* dh = DH_new();
|
||||
if (dh == NULL) {
|
||||
OXEN_LOG(error, "Alloc for dh failed");
|
||||
ERR_print_errors_fp(stderr);
|
||||
abort();
|
||||
}
|
||||
OXEN_LOG(info, "Generating DH parameter, this might take a while...");
|
||||
|
||||
const int res =
|
||||
DH_generate_parameters_ex(dh, prime_len, generator, nullptr);
|
||||
|
||||
if (!res) {
|
||||
OXEN_LOG(error, "Alloc for dh failed");
|
||||
ERR_print_errors_fp(stderr);
|
||||
abort();
|
||||
}
|
||||
|
||||
OXEN_LOG(info, "DH parameter done!");
|
||||
FILE* pFile = NULL;
|
||||
pFile = fopen(dh_path.u8string().c_str(), "wt");
|
||||
PEM_write_DHparams(pFile, dh);
|
||||
fclose(pFile);
|
||||
}
|
||||
|
||||
/* Add extension using V3 code: we can set the config file as NULL
|
||||
* because we wont reference any other sections.
|
||||
*/
|
||||
|
||||
int add_ext(X509* cert, int nid, char* value) {
|
||||
X509_EXTENSION* ex;
|
||||
X509V3_CTX ctx;
|
||||
/* This sets the 'context' of the extensions. */
|
||||
/* No configuration database */
|
||||
X509V3_set_ctx_nodb(&ctx);
|
||||
/* Issuer and subject certs: both the target since it is self signed,
|
||||
* no request and no CRL
|
||||
*/
|
||||
X509V3_set_ctx(&ctx, cert, cert, NULL, NULL, 0);
|
||||
ex = X509V3_EXT_conf_nid(NULL, &ctx, nid, value);
|
||||
if (!ex)
|
||||
return 0;
|
||||
|
||||
X509_add_ext(cert, ex, -1);
|
||||
X509_EXTENSION_free(ex);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int mkcert(X509** x509p, EVP_PKEY** pkeyp, int bits, int serial, int days) {
|
||||
X509* x;
|
||||
EVP_PKEY* pk;
|
||||
RSA* rsa;
|
||||
X509_NAME* name = NULL;
|
||||
BIGNUM* bne = NULL;
|
||||
int res = 0;
|
||||
|
||||
if ((pkeyp == NULL) || (*pkeyp == NULL)) {
|
||||
if ((pk = EVP_PKEY_new()) == NULL) {
|
||||
abort();
|
||||
return (0);
|
||||
}
|
||||
} else
|
||||
pk = *pkeyp;
|
||||
|
||||
if ((x509p == NULL) || (*x509p == NULL)) {
|
||||
if ((x = X509_new()) == NULL)
|
||||
goto err;
|
||||
} else
|
||||
x = *x509p;
|
||||
|
||||
bne = BN_new();
|
||||
rsa = RSA_new();
|
||||
|
||||
if (BN_set_word(bne, RSA_F4) != 1) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!RSA_generate_key_ex(rsa, bits, bne, NULL)) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
// https://www.openssl.org/docs/man1.0.2/man3/EVP_PKEY_assign_RSA.html
|
||||
// "[rsa] will be freed when the parent pkey is freed."
|
||||
if (!EVP_PKEY_assign_RSA(pk, rsa)) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
X509_set_version(x, 2);
|
||||
ASN1_INTEGER_set(X509_get_serialNumber(x), serial);
|
||||
X509_gmtime_adj(X509_get_notBefore(x), 0);
|
||||
X509_gmtime_adj(X509_get_notAfter(x), (long)60 * 60 * 24 * days);
|
||||
X509_set_pubkey(x, pk);
|
||||
|
||||
name = X509_get_subject_name(x);
|
||||
|
||||
/* This function creates and adds the entry, working out the
|
||||
* correct string type and performing checks on its length.
|
||||
* Normally we'd check the return value for errors...
|
||||
*/
|
||||
X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
|
||||
(const unsigned char*)"AU", -1, -1, 0);
|
||||
X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
|
||||
(const unsigned char*)"localhost", -1, -1, 0);
|
||||
X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC,
|
||||
(const unsigned char*)"Oxen", -1, -1, 0);
|
||||
|
||||
/* Its self signed so set the issuer name to be the same as the
|
||||
* subject.
|
||||
*/
|
||||
X509_set_issuer_name(x, name);
|
||||
|
||||
/* Add various extensions: standard extensions */
|
||||
// add_ext(x, NID_basic_constraints, "critical,CA:FALSE");
|
||||
// add_ext(x, NID_key_usage, "critical,keyCertSign,cRLSign");
|
||||
|
||||
add_ext(x, NID_subject_key_identifier, (char*)"hash");
|
||||
|
||||
/* Some Netscape specific extensions */
|
||||
// add_ext(x, NID_netscape_cert_type, "sslCA");
|
||||
|
||||
// add_ext(x, NID_netscape_comment, "example comment extension");
|
||||
|
||||
#ifdef CUSTOM_EXT
|
||||
/* Maybe even add our own extension based on existing */
|
||||
{
|
||||
int nid;
|
||||
nid = OBJ_create("1.2.3.4", "MyAlias", "My Test Alias Extension");
|
||||
X509V3_EXT_add_alias(nid, NID_netscape_comment);
|
||||
add_ext(x, nid, "example comment alias");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!X509_sign(x, pk, EVP_sha256()))
|
||||
goto err;
|
||||
|
||||
*x509p = x;
|
||||
*pkeyp = pk;
|
||||
res = 1;
|
||||
err:
|
||||
BN_free(bne);
|
||||
// rsa will be freed automatically when pk is freed by the caller
|
||||
return (res);
|
||||
}
|
||||
|
||||
void generate_cert(const std::filesystem::path& cert_path, const std::filesystem::path& key_path) {
|
||||
BIO* bio_err;
|
||||
X509* x509 = NULL;
|
||||
EVP_PKEY* pkey = NULL;
|
||||
FILE* key_f = NULL;
|
||||
FILE* cert_f = NULL;
|
||||
|
||||
OpenSSL_add_all_digests();
|
||||
|
||||
CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_ON);
|
||||
|
||||
bio_err = BIO_new_fp(stderr, BIO_NOCLOSE);
|
||||
|
||||
if (!mkcert(&x509, &pkey, 2048, 1, 10000))
|
||||
goto err;
|
||||
// X509_print_fp(stdout, x509);
|
||||
|
||||
key_f = fopen(key_path.u8string().c_str(), "wt");
|
||||
if (!PEM_write_PrivateKey(key_f, pkey, NULL, NULL, 0, NULL, NULL))
|
||||
goto err;
|
||||
cert_f = fopen(cert_path.u8string().c_str(), "wt");
|
||||
PEM_write_X509(cert_f, x509);
|
||||
|
||||
err:
|
||||
fclose(cert_f);
|
||||
fclose(key_f);
|
||||
X509_free(x509);
|
||||
EVP_PKEY_free(pkey);
|
||||
|
||||
CRYPTO_cleanup_all_ex_data();
|
||||
|
||||
// CRYPTO_mem_leaks(bio_err);
|
||||
BIO_free(bio_err);
|
||||
}
|
||||
|
||||
inline void load_server_certificate(const std::filesystem::path& base_path,
|
||||
boost::asio::ssl::context& ctx) {
|
||||
/*
|
||||
The certificate was generated from CMD.EXE on Windows 10 using:
|
||||
|
||||
winpty openssl dhparam -out dh.pem 2048
|
||||
winpty openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days
|
||||
10000 -out cert.pem -subj "//C=US\ST=CA\L=Los
|
||||
Angeles\O=Beast\CN=www.example.com"
|
||||
*/
|
||||
const auto cert_path = base_path / "cert.pem";
|
||||
const auto key_path = base_path / "key.pem";
|
||||
const auto dh_path = base_path / "dh.pem";
|
||||
|
||||
if (!std::filesystem::exists(cert_path) ||
|
||||
!std::filesystem::exists(key_path)) {
|
||||
generate_cert(cert_path, key_path);
|
||||
}
|
||||
if (!std::filesystem::exists(dh_path)) {
|
||||
generate_dh_pem(dh_path);
|
||||
}
|
||||
|
||||
ctx.set_options(boost::asio::ssl::context::default_workarounds |
|
||||
boost::asio::ssl::context::no_sslv2 |
|
||||
boost::asio::ssl::context::single_dh_use);
|
||||
|
||||
ctx.use_certificate_chain_file(cert_path.u8string());
|
||||
|
||||
ctx.use_private_key_file(key_path.u8string(),
|
||||
boost::asio::ssl::context::file_format::pem);
|
||||
|
||||
ctx.use_tmp_dh_file(dh_path.u8string());
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,349 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <Database.hpp>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/beast/http.hpp>
|
||||
#include <boost/circular_buffer.hpp>
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include "oxend_key.h"
|
||||
#include "pow.hpp"
|
||||
#include "reachability_testing.h"
|
||||
#include "stats.h"
|
||||
#include "swarm.h"
|
||||
|
||||
static constexpr size_t BLOCK_HASH_CACHE_SIZE = 30;
|
||||
static constexpr int STORAGE_SERVER_HARDFORK = 12;
|
||||
static constexpr int ENFORCED_REACHABILITY_HARDFORK = 13;
|
||||
static constexpr int OXENMQ_ONION_HARDFORK = 15;
|
||||
|
||||
class Database;
|
||||
|
||||
namespace http = boost::beast::http;
|
||||
using request_t = http::request<http::string_body>;
|
||||
|
||||
namespace oxenmq {
|
||||
struct ConnectionID;
|
||||
}
|
||||
|
||||
namespace oxen {
|
||||
|
||||
namespace storage {
|
||||
struct Item;
|
||||
} // namespace storage
|
||||
|
||||
struct sn_response_t;
|
||||
struct blockchain_test_answer_t;
|
||||
struct bc_test_params_t;
|
||||
|
||||
class OxendClient;
|
||||
class OxenmqServer;
|
||||
|
||||
namespace ss_client {
|
||||
class Request;
|
||||
enum class ReqMethod;
|
||||
using Callback = std::function<void(bool success, std::vector<std::string>)>;
|
||||
|
||||
} // namespace ss_client
|
||||
|
||||
namespace http_server {
|
||||
class connection_t;
|
||||
}
|
||||
|
||||
struct oxend_key_pair_t;
|
||||
|
||||
using connection_ptr = std::shared_ptr<http_server::connection_t>;
|
||||
|
||||
class Swarm;
|
||||
|
||||
struct signature;
|
||||
|
||||
using pow_dns_callback_t =
|
||||
std::function<void(const std::vector<pow_difficulty_t>&)>;
|
||||
|
||||
/// Represents failed attempt at communicating with a SNode
|
||||
/// (currently only for single messages)
|
||||
class FailedRequestHandler
|
||||
: public std::enable_shared_from_this<FailedRequestHandler> {
|
||||
boost::asio::io_context& ioc_;
|
||||
boost::asio::steady_timer retry_timer_;
|
||||
sn_record_t sn_;
|
||||
const std::shared_ptr<request_t> request_;
|
||||
|
||||
uint32_t attempt_count_ = 0;
|
||||
|
||||
/// Call this if we give up re-transmitting
|
||||
std::function<void()> give_up_callback_;
|
||||
|
||||
void retry(std::shared_ptr<FailedRequestHandler>&& self);
|
||||
|
||||
public:
|
||||
FailedRequestHandler(boost::asio::io_context& ioc, const sn_record_t& sn,
|
||||
std::shared_ptr<request_t> req,
|
||||
std::function<void()> give_up_cb = nullptr);
|
||||
|
||||
~FailedRequestHandler();
|
||||
/// Initiates the timer for retrying (which cannot be done directly in
|
||||
/// the constructor as it is not possible to create a shared ptr
|
||||
/// to itself before the construction is done)
|
||||
void init_timer();
|
||||
};
|
||||
|
||||
/// WRONG_REQ - request was ignored as not valid (e.g. incorrect tester)
|
||||
enum class MessageTestStatus { SUCCESS, RETRY, ERROR, WRONG_REQ };
|
||||
|
||||
enum class SnodeStatus { UNKNOWN, UNSTAKED, DECOMMISSIONED, ACTIVE };
|
||||
|
||||
/// All service node logic that is not network-specific
|
||||
class ServiceNode {
|
||||
using pub_key_t = std::string;
|
||||
using listeners_t = std::vector<connection_ptr>;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
boost::asio::io_context& worker_ioc_;
|
||||
std::thread worker_thread_;
|
||||
|
||||
// We set the default difficulty to some low value, so that we don't reject
|
||||
// clients unnecessarily before we get the DNS record
|
||||
pow_difficulty_t curr_pow_difficulty_{std::chrono::milliseconds(0), 1};
|
||||
std::vector<pow_difficulty_t> pow_history_{curr_pow_difficulty_};
|
||||
|
||||
bool force_start_ = false;
|
||||
bool syncing_ = true;
|
||||
int hardfork_ = 0;
|
||||
uint64_t block_height_ = 0;
|
||||
uint64_t target_height_ = 0;
|
||||
const OxendClient& oxend_client_;
|
||||
std::string block_hash_;
|
||||
std::unique_ptr<Swarm> swarm_;
|
||||
std::unique_ptr<Database> db_;
|
||||
|
||||
SnodeStatus status_ = SnodeStatus::UNKNOWN;
|
||||
|
||||
sn_record_t our_address_;
|
||||
|
||||
/// Cache for block_height/block_hash mapping
|
||||
boost::circular_buffer<std::pair<uint64_t, std::string>>
|
||||
block_hashes_cache_{BLOCK_HASH_CACHE_SIZE};
|
||||
|
||||
boost::asio::steady_timer pow_update_timer_;
|
||||
|
||||
boost::asio::steady_timer check_version_timer_;
|
||||
|
||||
boost::asio::steady_timer swarm_update_timer_;
|
||||
|
||||
boost::asio::steady_timer oxend_ping_timer_;
|
||||
|
||||
boost::asio::steady_timer stats_cleanup_timer_;
|
||||
|
||||
boost::asio::steady_timer peer_ping_timer_;
|
||||
|
||||
/// Used to periodially send messages from relay_buffer_
|
||||
boost::asio::steady_timer relay_timer_;
|
||||
|
||||
oxen::oxend_key_pair_t oxend_key_pair_;
|
||||
|
||||
// Need to make sure we only use this to get lmq() object and
|
||||
// not call any method that would in turn call a method in SN
|
||||
// causing a deadlock
|
||||
OxenmqServer& lmq_server_;
|
||||
|
||||
reachability_records_t reach_records_;
|
||||
|
||||
/// Container for recently received messages directly from
|
||||
/// clients;
|
||||
std::vector<message_t> relay_buffer_;
|
||||
|
||||
mutable all_stats_t all_stats_;
|
||||
|
||||
mutable std::recursive_mutex sn_mutex_;
|
||||
|
||||
void save_if_new(const message_t& msg);
|
||||
|
||||
// Save items to the database, notifying listeners as necessary
|
||||
void save_bulk(const std::vector<storage::Item>& items);
|
||||
|
||||
void on_bootstrap_update(block_update_t&& bu);
|
||||
|
||||
void on_swarm_update(block_update_t&& bu);
|
||||
|
||||
void bootstrap_data();
|
||||
|
||||
void bootstrap_peers(
|
||||
const std::vector<sn_record_t>& peers) const; // mutex not needed
|
||||
|
||||
void bootstrap_swarms(const std::vector<swarm_id_t>& swarms) const;
|
||||
|
||||
/// Distribute all our data to where it belongs
|
||||
/// (called when our old node got dissolved)
|
||||
void salvage_data() const; // mutex not needed
|
||||
|
||||
void attach_signature(std::shared_ptr<request_t>& request,
|
||||
const signature& sig) const; // mutex not needed
|
||||
|
||||
/// Reliably push message/batch to a service node
|
||||
void
|
||||
relay_data_reliable(const std::string& blob,
|
||||
const sn_record_t& address) const; // mutex not needed
|
||||
|
||||
template <typename Message>
|
||||
void relay_messages(
|
||||
const std::vector<Message>& messages,
|
||||
const std::vector<sn_record_t>& snodes) const; // mutex not needed
|
||||
|
||||
/// Request swarm structure from the deamon and reset the timer
|
||||
void swarm_timer_tick();
|
||||
|
||||
void cleanup_timer_tick();
|
||||
|
||||
void ping_peers_tick();
|
||||
|
||||
void relay_buffered_messages();
|
||||
|
||||
/// Check the latest version from DNS text record
|
||||
void check_version_timer_tick(); // mutex not needed
|
||||
/// Update PoW difficulty from DNS text record
|
||||
void
|
||||
pow_difficulty_timer_tick(const pow_dns_callback_t cb); // mutex not needed
|
||||
|
||||
/// Ping the storage server periodically as required for uptime proofs
|
||||
void oxend_ping_timer_tick();
|
||||
|
||||
/// Return tester/testee pair based on block_height
|
||||
bool derive_tester_testee(uint64_t block_height, sn_record_t& tester,
|
||||
sn_record_t& testee);
|
||||
|
||||
/// Send a request to a SN under test
|
||||
void send_storage_test_req(const sn_record_t& testee, uint64_t test_height,
|
||||
const storage::Item& item);
|
||||
|
||||
void send_blockchain_test_req(const sn_record_t& testee,
|
||||
bc_test_params_t params, uint64_t test_height,
|
||||
blockchain_test_answer_t answer);
|
||||
|
||||
/// Report `sn` to Oxend as unreachable
|
||||
void report_node_reachability(const sn_pub_key_t& sn, bool reachable);
|
||||
|
||||
void process_storage_test_response(const sn_record_t& testee,
|
||||
const storage::Item& item,
|
||||
uint64_t test_height,
|
||||
sn_response_t&& res);
|
||||
|
||||
void process_reach_test_result(const sn_pub_key_t& pk, ReachType type,
|
||||
bool success);
|
||||
|
||||
/// From a peer
|
||||
void process_blockchain_test_response(sn_response_t&& res,
|
||||
blockchain_test_answer_t our_answer,
|
||||
sn_record_t testee,
|
||||
uint64_t bc_height);
|
||||
|
||||
/// Check if it is our turn to test and initiate peer test if so
|
||||
void initiate_peer_test();
|
||||
|
||||
// Select a random message from our database, return false on error
|
||||
bool select_random_message(storage::Item& item); // mutex not needed
|
||||
|
||||
// Ping some node and record its reachability
|
||||
void test_reachability(const sn_record_t& sn); // mutex not needed
|
||||
|
||||
void sign_request(std::shared_ptr<request_t>& req) const;
|
||||
|
||||
public:
|
||||
ServiceNode(boost::asio::io_context& ioc,
|
||||
boost::asio::io_context& worker_ioc, uint16_t port,
|
||||
OxenmqServer& lmq_server,
|
||||
const oxen::oxend_key_pair_t& key_pair,
|
||||
const std::string& ed25519hex, const std::string& db_location,
|
||||
OxendClient& oxend_client, const bool force_start);
|
||||
|
||||
~ServiceNode();
|
||||
|
||||
// Return info about this node as it is advertised to other nodes
|
||||
const sn_record_t& own_address() { return our_address_; }
|
||||
|
||||
// Record the time of our last being tested over lmq/http
|
||||
void update_last_ping(ReachType type);
|
||||
|
||||
// These two are only needed because we store stats in Service Node,
|
||||
// might move it out later
|
||||
void record_proxy_request();
|
||||
void record_onion_request();
|
||||
|
||||
// This is new, so it does not need to support http, thus new (if temp)
|
||||
// method
|
||||
void send_onion_to_sn_v1(const sn_record_t& sn, const std::string& payload,
|
||||
const std::string& eph_key,
|
||||
ss_client::Callback cb) const;
|
||||
|
||||
/// Same as v1, but using the new protocol (ciphertext as binary)
|
||||
void send_onion_to_sn_v2(const sn_record_t& sn, const std::string& payload,
|
||||
const std::string& eph_key,
|
||||
ss_client::Callback cb) const;
|
||||
|
||||
// TODO: move this eventually out of SN
|
||||
// Send by either http or lmq
|
||||
void send_to_sn(const sn_record_t& sn, ss_client::ReqMethod method,
|
||||
ss_client::Request req, ss_client::Callback cb) const;
|
||||
|
||||
// Return true if the service node is ready to start running
|
||||
bool snode_ready(std::string* reason = nullptr);
|
||||
|
||||
/// Process message received from a client, return false if not in a swarm
|
||||
bool process_store(const message_t& msg);
|
||||
|
||||
/// Process incoming blob of messages: add to DB if new
|
||||
void process_push_batch(const std::string& blob);
|
||||
|
||||
/// request blockchain test from a peer
|
||||
void perform_blockchain_test(
|
||||
bc_test_params_t params,
|
||||
std::function<void(blockchain_test_answer_t)>&& cb) const;
|
||||
|
||||
// Attempt to find an answer (message body) to the storage test
|
||||
MessageTestStatus process_storage_test_req(uint64_t blk_height,
|
||||
const std::string& tester_addr,
|
||||
const std::string& msg_hash,
|
||||
std::string& answer);
|
||||
|
||||
bool is_pubkey_for_us(const user_pubkey_t& pk) const;
|
||||
|
||||
std::vector<sn_record_t> get_snodes_by_pk(const user_pubkey_t& pk);
|
||||
|
||||
bool is_snode_address_known(const std::string&);
|
||||
|
||||
/// return all messages for a particular PK (in JSON)
|
||||
bool get_all_messages(std::vector<storage::Item>& all_entries) const;
|
||||
|
||||
// Return the current PoW difficulty
|
||||
int get_curr_pow_difficulty() const;
|
||||
|
||||
bool retrieve(const std::string& pubKey, const std::string& last_hash,
|
||||
std::vector<storage::Item>& items);
|
||||
|
||||
void
|
||||
set_difficulty_history(const std::vector<pow_difficulty_t>& new_history);
|
||||
|
||||
// Stats for session clients that want to know the version number
|
||||
std::string get_stats_for_session_client() const;
|
||||
|
||||
std::string get_stats() const;
|
||||
|
||||
std::string get_status_line() const;
|
||||
|
||||
std::optional<sn_record_t>
|
||||
find_node_by_x25519_bin(const sn_pub_key_t& address) const;
|
||||
|
||||
std::optional<sn_record_t>
|
||||
find_node_by_ed25519_pk(const std::string& pk) const;
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,41 +0,0 @@
|
|||
#include "stats.h"
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace oxen {
|
||||
|
||||
static void cleanup_old(std::deque<test_result_t>& tests, time_t cutoff_time) {
|
||||
|
||||
const auto it = std::find_if(tests.begin(), tests.end(),
|
||||
[cutoff_time](const test_result_t& res) {
|
||||
return res.timestamp > cutoff_time;
|
||||
});
|
||||
|
||||
tests.erase(tests.begin(), it);
|
||||
}
|
||||
|
||||
static constexpr std::chrono::seconds ROLLING_WINDOW_SIZE = 120min;
|
||||
|
||||
void all_stats_t::cleanup() {
|
||||
|
||||
using std::chrono::duration_cast;
|
||||
using std::chrono::seconds;
|
||||
|
||||
const auto cutoff = time(nullptr) - ROLLING_WINDOW_SIZE.count();
|
||||
|
||||
for (auto& kv : peer_report_) {
|
||||
|
||||
const sn_record_t& sn = kv.first;
|
||||
|
||||
cleanup_old(peer_report_[sn].storage_tests, cutoff);
|
||||
cleanup_old(peer_report_[sn].blockchain_tests, cutoff);
|
||||
}
|
||||
|
||||
/// updated stats for "previous period"
|
||||
this->next_period();
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,165 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "oxen_common.h"
|
||||
#include <atomic>
|
||||
#include <deque>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
struct time_entry_t {
|
||||
time_t timestamp;
|
||||
};
|
||||
|
||||
enum class ResultType { OK, MISMATCH, OTHER, REJECTED };
|
||||
|
||||
struct test_result_t {
|
||||
|
||||
// seconds since Epoch when entry was recorded
|
||||
time_t timestamp;
|
||||
ResultType result;
|
||||
};
|
||||
|
||||
inline const char* to_str(ResultType result) {
|
||||
switch (result) {
|
||||
case ResultType::OK:
|
||||
return "OK";
|
||||
case ResultType::MISMATCH:
|
||||
return "MISMATCH";
|
||||
case ResultType::REJECTED:
|
||||
return "REJECTED";
|
||||
case ResultType::OTHER:
|
||||
default:
|
||||
return "OTHER";
|
||||
}
|
||||
}
|
||||
|
||||
// Stats per peer
|
||||
struct peer_stats_t {
|
||||
|
||||
// how many times a single request failed
|
||||
uint64_t requests_failed = 0;
|
||||
// how many times a series of push requests failed
|
||||
// causing this node to give up re-transmitting
|
||||
uint64_t pushes_failed = 0;
|
||||
|
||||
std::deque<test_result_t> storage_tests;
|
||||
|
||||
std::deque<test_result_t> blockchain_tests;
|
||||
};
|
||||
|
||||
class all_stats_t {
|
||||
|
||||
// ===== This node's stats =====
|
||||
uint64_t total_client_store_requests = 0;
|
||||
// Number of requests in the latest x min interval
|
||||
uint64_t previous_period_store_requests = 0;
|
||||
// Number of requests after the latest x min interval
|
||||
uint64_t recent_store_requests = 0;
|
||||
|
||||
uint64_t total_client_retrieve_requests = 0;
|
||||
// Number of requests in the latest x min interval
|
||||
uint64_t previous_period_retrieve_requests = 0;
|
||||
// Number of requests after the latest x min interval
|
||||
uint64_t recent_retrieve_requests = 0;
|
||||
|
||||
uint64_t previous_period_proxy_requests = 0;
|
||||
std::atomic<uint64_t> recent_proxy_requests{0};
|
||||
|
||||
uint64_t previous_period_onion_requests = 0;
|
||||
std::atomic<uint64_t> recent_onion_requests{0};
|
||||
|
||||
time_point_t reset_time_ = std::chrono::steady_clock::now();
|
||||
// =============================
|
||||
|
||||
/// update period moving recent request counters to
|
||||
/// the `previous period`
|
||||
void next_period() {
|
||||
previous_period_store_requests = recent_store_requests;
|
||||
previous_period_retrieve_requests = recent_retrieve_requests;
|
||||
previous_period_proxy_requests = recent_proxy_requests.load();
|
||||
previous_period_onion_requests = recent_onion_requests.load();
|
||||
recent_store_requests = 0;
|
||||
recent_retrieve_requests = 0;
|
||||
recent_proxy_requests = 0;
|
||||
recent_onion_requests = 0;
|
||||
}
|
||||
|
||||
public:
|
||||
// stats per every peer in our swarm (including former peers)
|
||||
std::unordered_map<sn_record_t, peer_stats_t> peer_report_;
|
||||
|
||||
void record_request_failed(const sn_record_t& sn) {
|
||||
peer_report_[sn].requests_failed++;
|
||||
}
|
||||
|
||||
void record_push_failed(const sn_record_t& sn) {
|
||||
peer_report_[sn].pushes_failed++;
|
||||
}
|
||||
|
||||
void record_storage_test_result(const sn_record_t& sn, ResultType result) {
|
||||
test_result_t res = {std::time(nullptr), result};
|
||||
peer_report_[sn].storage_tests.push_back(res);
|
||||
}
|
||||
|
||||
void record_blockchain_test_result(const sn_record_t& sn,
|
||||
ResultType result) {
|
||||
test_result_t t = {std::time(nullptr), result};
|
||||
peer_report_[sn].blockchain_tests.push_back(t);
|
||||
}
|
||||
|
||||
// remove old test entries and reset counters, update reset time
|
||||
void cleanup();
|
||||
|
||||
void bump_proxy_requests() { recent_proxy_requests++; }
|
||||
|
||||
void bump_onion_requests() { recent_proxy_requests++; }
|
||||
|
||||
void bump_store_requests() {
|
||||
total_client_store_requests++;
|
||||
recent_store_requests++;
|
||||
}
|
||||
|
||||
void bump_retrieve_requests() {
|
||||
total_client_retrieve_requests++;
|
||||
recent_retrieve_requests++;
|
||||
}
|
||||
|
||||
uint64_t get_total_store_requests() const {
|
||||
return total_client_store_requests;
|
||||
}
|
||||
|
||||
uint64_t get_recent_store_requests() const { return recent_store_requests; }
|
||||
|
||||
uint64_t get_recent_proxy_requests() const { return recent_proxy_requests; }
|
||||
|
||||
uint64_t get_previous_period_proxy_requests() const {
|
||||
return previous_period_proxy_requests;
|
||||
}
|
||||
|
||||
uint64_t get_recent_onion_requests() const { return recent_onion_requests; }
|
||||
|
||||
uint64_t get_previous_period_onion_requests() const {
|
||||
return previous_period_onion_requests;
|
||||
}
|
||||
|
||||
uint64_t get_previous_period_store_requests() const {
|
||||
return previous_period_store_requests;
|
||||
}
|
||||
|
||||
uint64_t get_total_retrieve_requests() const {
|
||||
return total_client_retrieve_requests;
|
||||
}
|
||||
|
||||
uint64_t get_recent_retrieve_requests() const {
|
||||
return recent_retrieve_requests;
|
||||
}
|
||||
|
||||
uint64_t get_previous_period_retrieve_requests() const {
|
||||
return previous_period_retrieve_requests;
|
||||
}
|
||||
|
||||
time_point_t get_reset_time() const { return reset_time_; }
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,390 +0,0 @@
|
|||
#include "swarm.h"
|
||||
#include "http_connection.h"
|
||||
#include "oxen_logger.h"
|
||||
|
||||
#include "service_node.h"
|
||||
|
||||
#include <ostream>
|
||||
#include <stdlib.h>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace oxen {
|
||||
|
||||
static bool swarm_exists(const all_swarms_t& all_swarms,
|
||||
const swarm_id_t& swarm) {
|
||||
|
||||
const auto it = std::find_if(
|
||||
all_swarms.begin(), all_swarms.end(),
|
||||
[&swarm](const SwarmInfo& si) { return si.swarm_id == swarm; });
|
||||
|
||||
return it != all_swarms.end();
|
||||
}
|
||||
|
||||
void debug_print(std::ostream& os, const block_update_t& bu) {
|
||||
|
||||
os << "Block update: {\n";
|
||||
os << " height: " << bu.height << '\n';
|
||||
os << " block hash: " << bu.block_hash << '\n';
|
||||
os << " hardfork: " << bu.hardfork << '\n';
|
||||
os << " swarms: [\n";
|
||||
|
||||
for (const SwarmInfo& swarm : bu.swarms) {
|
||||
os << " {\n";
|
||||
os << " id: " << swarm.swarm_id << '\n';
|
||||
os << " }\n";
|
||||
}
|
||||
|
||||
os << " ]\n";
|
||||
os << "}\n";
|
||||
}
|
||||
|
||||
Swarm::~Swarm() = default;
|
||||
|
||||
bool Swarm::is_existing_swarm(swarm_id_t sid) const {
|
||||
|
||||
return std::any_of(all_valid_swarms_.begin(), all_valid_swarms_.end(),
|
||||
[sid](const SwarmInfo& cur_swarm_info) {
|
||||
return cur_swarm_info.swarm_id == sid;
|
||||
});
|
||||
}
|
||||
|
||||
SwarmEvents Swarm::derive_swarm_events(const all_swarms_t& swarms) const {
|
||||
|
||||
SwarmEvents events = {};
|
||||
|
||||
const auto our_swarm_it = std::find_if(
|
||||
swarms.begin(), swarms.end(), [this](const SwarmInfo& swarm_info) {
|
||||
const auto& snodes = swarm_info.snodes;
|
||||
return std::find(snodes.begin(), snodes.end(), our_address_) !=
|
||||
snodes.end();
|
||||
});
|
||||
|
||||
if (our_swarm_it == swarms.end()) {
|
||||
// We are not in any swarm, nothing to do
|
||||
events.our_swarm_id = INVALID_SWARM_ID;
|
||||
return events;
|
||||
}
|
||||
|
||||
const auto& new_swarm_snodes = our_swarm_it->snodes;
|
||||
const auto new_swarm_id = our_swarm_it->swarm_id;
|
||||
|
||||
events.our_swarm_id = new_swarm_id;
|
||||
events.our_swarm_members = new_swarm_snodes;
|
||||
|
||||
if (cur_swarm_id_ == INVALID_SWARM_ID) {
|
||||
// Only started in a swarm, nothing to do at this stage
|
||||
return events;
|
||||
}
|
||||
|
||||
if (cur_swarm_id_ != new_swarm_id) {
|
||||
// Got moved to a new swarm
|
||||
if (!swarm_exists(swarms, cur_swarm_id_)) {
|
||||
// Dissolved, new to push all our data to new swarms
|
||||
events.dissolved = true;
|
||||
}
|
||||
|
||||
// If our old swarm is still alive, there is nothing for us to do
|
||||
return events;
|
||||
}
|
||||
|
||||
/// --- WE are still in the same swarm if we reach here ---
|
||||
|
||||
/// See if anyone joined our swarm
|
||||
for (const auto& sn : new_swarm_snodes) {
|
||||
|
||||
const auto it = std::find(swarm_peers_.begin(), swarm_peers_.end(), sn);
|
||||
|
||||
if (it == swarm_peers_.end() && sn != our_address_) {
|
||||
events.new_snodes.push_back(sn);
|
||||
}
|
||||
}
|
||||
|
||||
/// See if there are any new swarms
|
||||
|
||||
for (const auto& swarm_info : swarms) {
|
||||
|
||||
const bool found = this->is_existing_swarm(swarm_info.swarm_id);
|
||||
|
||||
if (!found) {
|
||||
events.new_swarms.push_back(swarm_info.swarm_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// NOTE: need to be careful and make sure we don't miss any
|
||||
/// swarm update (e.g. if we don't update frequently enough)
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
void Swarm::set_swarm_id(swarm_id_t sid) {
|
||||
|
||||
if (sid == INVALID_SWARM_ID) {
|
||||
OXEN_LOG(warn, "We are not currently an active Service Node");
|
||||
} else {
|
||||
|
||||
if (cur_swarm_id_ == INVALID_SWARM_ID) {
|
||||
OXEN_LOG(info, "EVENT: started SN in swarm: {}", sid);
|
||||
} else if (cur_swarm_id_ != sid) {
|
||||
OXEN_LOG(info, "EVENT: got moved into a new swarm: {}", sid);
|
||||
}
|
||||
}
|
||||
|
||||
cur_swarm_id_ = sid;
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, sn_record_t>
|
||||
get_snode_map_from_swarms(const all_swarms_t& swarms) {
|
||||
|
||||
std::unordered_map<std::string, sn_record_t> snode_map;
|
||||
for (const auto& swarm : swarms) {
|
||||
for (const auto& snode : swarm.snodes) {
|
||||
snode_map.insert({snode.sn_address(), snode});
|
||||
}
|
||||
}
|
||||
return snode_map;
|
||||
}
|
||||
|
||||
static all_swarms_t apply_ips(const all_swarms_t& swarms_to_keep,
|
||||
const all_swarms_t& other_swarms) {
|
||||
|
||||
all_swarms_t result_swarms = swarms_to_keep;
|
||||
const auto other_snode_map = get_snode_map_from_swarms(other_swarms);
|
||||
|
||||
int updates_count = 0;
|
||||
for (auto& swarm : result_swarms) {
|
||||
for (auto& snode : swarm.snodes) {
|
||||
const auto other_snode_it =
|
||||
other_snode_map.find(snode.sn_address());
|
||||
if (other_snode_it != other_snode_map.end()) {
|
||||
const auto& other_snode = other_snode_it->second;
|
||||
// Keep swarms_to_keep but don't overwrite with default IPs
|
||||
if (snode.ip() == "0.0.0.0") {
|
||||
snode.set_ip(other_snode.ip());
|
||||
updates_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
OXEN_LOG(debug, "Updated {} entries from oxend", updates_count);
|
||||
return result_swarms;
|
||||
}
|
||||
|
||||
void Swarm::apply_swarm_changes(const all_swarms_t& new_swarms) {
|
||||
|
||||
OXEN_LOG(trace, "Applying swarm changes");
|
||||
|
||||
all_valid_swarms_ = apply_ips(new_swarms, all_valid_swarms_);
|
||||
}
|
||||
|
||||
void Swarm::update_state(const all_swarms_t& swarms,
|
||||
const std::vector<sn_record_t>& decommissioned,
|
||||
const SwarmEvents& events, bool active) {
|
||||
|
||||
if (active) {
|
||||
|
||||
// The following only makes sense for active nodes in a swarm
|
||||
|
||||
if (events.dissolved) {
|
||||
OXEN_LOG(info, "EVENT: our old swarm got DISSOLVED!");
|
||||
}
|
||||
|
||||
for (const sn_record_t& sn : events.new_snodes) {
|
||||
OXEN_LOG(info, "EVENT: detected new SN: {}", sn);
|
||||
}
|
||||
|
||||
for (swarm_id_t swarm : events.new_swarms) {
|
||||
OXEN_LOG(info, "EVENT: detected a new swarm: {}", swarm);
|
||||
}
|
||||
|
||||
apply_swarm_changes(swarms);
|
||||
|
||||
const auto& members = events.our_swarm_members;
|
||||
|
||||
/// sanity check
|
||||
if (members.empty())
|
||||
return;
|
||||
|
||||
swarm_peers_.clear();
|
||||
swarm_peers_.reserve(members.size() - 1);
|
||||
|
||||
std::copy_if(members.begin(), members.end(),
|
||||
std::back_inserter(swarm_peers_),
|
||||
[this](const sn_record_t& record) {
|
||||
return record != our_address_;
|
||||
});
|
||||
}
|
||||
|
||||
// Store a copy of every node in a separate data structure
|
||||
all_funded_nodes_.clear();
|
||||
|
||||
for (const auto& si : swarms) {
|
||||
for (const auto& sn : si.snodes) {
|
||||
all_funded_nodes_.push_back(sn);
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& sn : decommissioned) {
|
||||
all_funded_nodes_.push_back(sn);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<sn_record_t> Swarm::choose_funded_node() const {
|
||||
|
||||
if (all_funded_nodes_.empty())
|
||||
return std::nullopt;
|
||||
|
||||
const auto idx =
|
||||
util::uniform_distribution_portable(all_funded_nodes_.size());
|
||||
|
||||
// Note: this can return our own node which should be fine
|
||||
return all_funded_nodes_[idx];
|
||||
}
|
||||
|
||||
std::optional<sn_record_t> Swarm::find_node_by_port(uint16_t port) const {
|
||||
|
||||
for (const auto& sn : all_funded_nodes_) {
|
||||
if (sn.port() == port) {
|
||||
return sn;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<sn_record_t>
|
||||
Swarm::find_node_by_ed25519_pk(const std::string& pk) const {
|
||||
|
||||
for (const auto& sn : all_funded_nodes_) {
|
||||
if (sn.pubkey_ed25519_hex() == pk) {
|
||||
return sn;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<sn_record_t>
|
||||
Swarm::find_node_by_x25519_bin(const std::string& pk) const {
|
||||
|
||||
for (const auto& sn : all_funded_nodes_) {
|
||||
if (sn.pubkey_x25519_bin() == pk) {
|
||||
return sn;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<sn_record_t> Swarm::get_node_by_pk(const sn_pub_key_t& pk) const {
|
||||
|
||||
for (const auto& sn : all_funded_nodes_) {
|
||||
if (sn.pub_key_base32z() == pk) {
|
||||
return sn;
|
||||
}
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static uint64_t hex_to_u64(const user_pubkey_t& pk) {
|
||||
|
||||
/// Create a buffer for 16 characters null terminated
|
||||
char buf[17] = {};
|
||||
|
||||
/// Note: pk is expected to contain two leading characters
|
||||
/// (05 for the messenger) that do not participate in mapping
|
||||
|
||||
/// Note: if conversion is not possible, we will still
|
||||
/// get a value in res (possibly 0 or UINT64_MAX), which
|
||||
/// we are not handling at the moment
|
||||
uint64_t res = 0;
|
||||
for (auto it = pk.str().begin() + 2; it < pk.str().end(); it += 16) {
|
||||
memcpy(buf, &(*it), 16);
|
||||
res ^= strtoull(buf, nullptr, 16);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
bool Swarm::is_pubkey_for_us(const user_pubkey_t& pk) const {
|
||||
|
||||
/// TODO: Make sure no exceptions bubble up from here!
|
||||
return cur_swarm_id_ == get_swarm_by_pk(all_valid_swarms_, pk);
|
||||
}
|
||||
|
||||
bool Swarm::is_fully_funded_node(const std::string& sn_address) const {
|
||||
|
||||
return std::any_of(all_funded_nodes_.begin(), all_funded_nodes_.end(),
|
||||
[&sn_address](const sn_record_t& sn) {
|
||||
return sn.sn_address() == sn_address;
|
||||
});
|
||||
}
|
||||
|
||||
swarm_id_t get_swarm_by_pk(const std::vector<SwarmInfo>& all_swarms,
|
||||
const user_pubkey_t& pk) {
|
||||
|
||||
const uint64_t res = hex_to_u64(pk);
|
||||
|
||||
/// We reserve UINT64_MAX as a sentinel swarm id for unassigned snodes
|
||||
constexpr swarm_id_t MAX_ID = INVALID_SWARM_ID - 1;
|
||||
|
||||
swarm_id_t cur_best = INVALID_SWARM_ID;
|
||||
uint64_t cur_min = INVALID_SWARM_ID;
|
||||
|
||||
/// We don't require that all_swarms is sorted, so we find
|
||||
/// the smallest/largest elements in the same loop
|
||||
swarm_id_t leftmost_id = INVALID_SWARM_ID;
|
||||
swarm_id_t rightmost_id = 0;
|
||||
|
||||
for (const auto& si : all_swarms) {
|
||||
|
||||
if (si.swarm_id == INVALID_SWARM_ID) {
|
||||
/// Just to be sure we check again that no decomissioned
|
||||
/// node is exposed to clients
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64_t dist =
|
||||
(si.swarm_id > res) ? (si.swarm_id - res) : (res - si.swarm_id);
|
||||
if (dist < cur_min) {
|
||||
cur_best = si.swarm_id;
|
||||
cur_min = dist;
|
||||
}
|
||||
|
||||
/// Find the letfmost
|
||||
if (si.swarm_id < leftmost_id) {
|
||||
leftmost_id = si.swarm_id;
|
||||
}
|
||||
|
||||
if (si.swarm_id > rightmost_id) {
|
||||
rightmost_id = si.swarm_id;
|
||||
}
|
||||
}
|
||||
|
||||
// handle special case
|
||||
if (res > rightmost_id) {
|
||||
// since rightmost is at least as large as leftmost,
|
||||
// res >= leftmost_id in this branch, so the value will
|
||||
// not overflow; the same logic applies to the else branch
|
||||
const uint64_t dist = (MAX_ID - res) + leftmost_id;
|
||||
if (dist < cur_min) {
|
||||
cur_best = leftmost_id;
|
||||
}
|
||||
} else if (res < leftmost_id) {
|
||||
const uint64_t dist = res + (MAX_ID - rightmost_id);
|
||||
if (dist < cur_min) {
|
||||
cur_best = rightmost_id;
|
||||
}
|
||||
}
|
||||
|
||||
return cur_best;
|
||||
}
|
||||
|
||||
const std::vector<sn_record_t>& Swarm::other_nodes() const {
|
||||
return swarm_peers_;
|
||||
}
|
||||
|
||||
} // namespace oxen
|
|
@ -1,121 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <oxenmq/auth.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "oxen_common.h"
|
||||
|
||||
namespace boost {
|
||||
namespace asio {
|
||||
class io_context;
|
||||
}
|
||||
} // namespace boost
|
||||
|
||||
namespace oxen {
|
||||
|
||||
class ServiceNode;
|
||||
|
||||
struct SwarmInfo {
|
||||
swarm_id_t swarm_id;
|
||||
std::vector<sn_record_t> snodes;
|
||||
};
|
||||
|
||||
using all_swarms_t = std::vector<SwarmInfo>;
|
||||
|
||||
struct block_update_t {
|
||||
all_swarms_t swarms;
|
||||
std::vector<sn_record_t> decommissioned_nodes;
|
||||
oxenmq::pubkey_set active_x25519_pubkeys;
|
||||
uint64_t height;
|
||||
std::string block_hash;
|
||||
int hardfork;
|
||||
bool unchanged = false;
|
||||
};
|
||||
|
||||
void debug_print(std::ostream& os, const block_update_t& bu);
|
||||
|
||||
swarm_id_t get_swarm_by_pk(const std::vector<SwarmInfo>& all_swarms,
|
||||
const user_pubkey_t& pk);
|
||||
|
||||
struct SwarmEvents {
|
||||
|
||||
/// our (potentially new) swarm id
|
||||
swarm_id_t our_swarm_id;
|
||||
/// whether our swarm got dissolved and we
|
||||
/// need to salvage our stale data
|
||||
bool dissolved = false;
|
||||
/// detected new swarms that need to be bootstrapped
|
||||
std::vector<swarm_id_t> new_swarms;
|
||||
/// detected new snodes in our swarm
|
||||
std::vector<sn_record_t> new_snodes;
|
||||
/// our swarm members
|
||||
std::vector<sn_record_t> our_swarm_members;
|
||||
};
|
||||
|
||||
class Swarm {
|
||||
|
||||
swarm_id_t cur_swarm_id_ = INVALID_SWARM_ID;
|
||||
/// Note: this excludes the "dummy" swarm
|
||||
std::vector<SwarmInfo> all_valid_swarms_;
|
||||
sn_record_t our_address_;
|
||||
std::vector<sn_record_t> swarm_peers_;
|
||||
/// This includes decommissioned nodes
|
||||
std::vector<sn_record_t> all_funded_nodes_;
|
||||
|
||||
/// Check if `sid` is an existing (active) swarm
|
||||
bool is_existing_swarm(swarm_id_t sid) const;
|
||||
|
||||
public:
|
||||
Swarm(sn_record_t address) : our_address_(address) {}
|
||||
|
||||
~Swarm();
|
||||
|
||||
/// Extract relevant information from incoming swarm composition
|
||||
SwarmEvents derive_swarm_events(const all_swarms_t& swarms) const;
|
||||
|
||||
/// Update swarm state according to `events`. If not `is_active`
|
||||
/// only update the list of all nodes
|
||||
void update_state(const all_swarms_t& swarms,
|
||||
const std::vector<sn_record_t>& decommissioned,
|
||||
const SwarmEvents& events, bool is_active);
|
||||
|
||||
void apply_swarm_changes(const all_swarms_t& new_swarms);
|
||||
|
||||
bool is_pubkey_for_us(const user_pubkey_t& pk) const;
|
||||
|
||||
/// Whether `sn_address` is found in any of the swarms, including the
|
||||
/// dummy swarm with decommissioned nodes
|
||||
bool is_fully_funded_node(const std::string& sn_address) const;
|
||||
|
||||
const std::vector<sn_record_t>& other_nodes() const;
|
||||
|
||||
const std::vector<SwarmInfo>& all_valid_swarms() const {
|
||||
return all_valid_swarms_;
|
||||
}
|
||||
|
||||
swarm_id_t our_swarm_id() const { return cur_swarm_id_; }
|
||||
|
||||
bool is_valid() const { return cur_swarm_id_ != INVALID_SWARM_ID; }
|
||||
|
||||
void set_swarm_id(swarm_id_t sid);
|
||||
|
||||
// Select a node from all existing nodes (excluding us); throws if there is
|
||||
// no other nodes
|
||||
std::optional<sn_record_t> choose_funded_node() const;
|
||||
|
||||
// TEMPORARY (TODO: change to finding by x25519 PK)
|
||||
std::optional<sn_record_t> find_node_by_port(uint16_t port) const;
|
||||
|
||||
// Get the node with public key `pk` if exists
|
||||
std::optional<sn_record_t> get_node_by_pk(const sn_pub_key_t& pk) const;
|
||||
|
||||
std::optional<sn_record_t>
|
||||
find_node_by_ed25519_pk(const sn_pub_key_t& address) const;
|
||||
|
||||
std::optional<sn_record_t>
|
||||
find_node_by_x25519_bin(const sn_pub_key_t& address) const;
|
||||
};
|
||||
|
||||
} // namespace oxen
|
|
@ -1,10 +0,0 @@
|
|||
#include "version.h"
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
const std::array<uint16_t, 3> STORAGE_SERVER_VERSION = {@PROJECT_VERSION_MAJOR@, @PROJECT_VERSION_MINOR@, @PROJECT_VERSION_PATCH@};
|
||||
|
||||
const std::string_view STORAGE_SERVER_VERSION_STRING = "@PROJECT_VERSION@"sv;
|
||||
const std::string_view STORAGE_SERVER_GIT_HASH_STRING = "@SHORT_HASH@"sv;
|
||||
const std::string_view STORAGE_SERVER_BUILD_TIME = "@BUILD_TIME@"sv;
|
||||
const std::string_view STORAGE_SERVER_VERSION_INFO = "Oxen Storage Server v@PROJECT_VERSION@\n git commit hash: @SHORT_HASH@\n build time: @BUILD_TIME@\n"sv;
|
13
network-tests/README.md
Normal file
13
network-tests/README.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Storage server testnet test suite
|
||||
|
||||
This directory contains a Python/pytest-based test repository to perform tests against the live Oxen
|
||||
testnet.
|
||||
|
||||
Usage:
|
||||
|
||||
- install the [https://ci.oxen.rocks/oxen-io/oxen-pyoxenmq](oxenmq Python module). You can build it
|
||||
from source, or alternatively grab the python3-oxenmq deb package from our deb repo
|
||||
(https://deb.oxen.io)u.
|
||||
|
||||
- Run `py.test-3` to run the test suite. (You likely need to install python3-pytest and
|
||||
python3-nacl, if not already installed).
|
44
network-tests/conftest.py
Normal file
44
network-tests/conftest.py
Normal file
|
@ -0,0 +1,44 @@
|
|||
|
||||
import pytest
|
||||
from oxenmq import OxenMQ, Address
|
||||
import json
|
||||
import random
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--exclude", action="store", default="")
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def omq():
|
||||
omq = OxenMQ()
|
||||
omq.max_message_size = 10*1024*1024
|
||||
omq.start()
|
||||
return omq
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def sns(omq):
|
||||
remote = omq.connect_remote(Address("curve://public.loki.foundation:38161/80adaead94db3b0402a6057869bdbe63204a28e93589fd95a035480ed6c03b45"))
|
||||
x = omq.request_future(remote, "rpc.get_service_nodes", b'{"active_only": true}').get()
|
||||
assert(len(x) == 2 and x[0] == b'200')
|
||||
return json.loads(x[1])
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def random_sn(omq, sns):
|
||||
sn = random.choice(sns['service_node_states'])
|
||||
addr = Address(sn['public_ip'], sn['storage_lmq_port'], bytes.fromhex(sn['pubkey_x25519']))
|
||||
conn = omq.connect_remote(addr)
|
||||
return conn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sk():
|
||||
from nacl.signing import SigningKey
|
||||
return SigningKey.generate()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def exclude(pytestconfig):
|
||||
s = pytestconfig.getoption("exclude")
|
||||
return {s} if s and len(s) else {}
|
2
network-tests/pytest.ini
Normal file
2
network-tests/pytest.ini
Normal file
|
@ -0,0 +1,2 @@
|
|||
[pytest]
|
||||
norecursedirs=*
|
90
network-tests/ss.py
Normal file
90
network-tests/ss.py
Normal file
|
@ -0,0 +1,90 @@
|
|||
import time
|
||||
import json
|
||||
import base64
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.signing import SigningKey
|
||||
from nacl.hash import blake2b
|
||||
import random
|
||||
|
||||
def expire_all(sk, *, delta=120, timestamp=None):
|
||||
ts = timestamp if timestamp else int((time.time() + delta) * 1000)
|
||||
return json.dumps({
|
||||
"pubkey": sk.verify_key.encode().hex(),
|
||||
"expiry": ts,
|
||||
"signature": base64.b64encode(sk.sign(b"expire_all" + str(ts).encode()).signature).decode()},
|
||||
separators=(',',':'))
|
||||
|
||||
def expire_msgs(sk, messages, *, delta=120, timestamp=None):
|
||||
ts = timestamp if timestamp else int((time.time() + delta) * 1000)
|
||||
return json.dumps({
|
||||
"pubkey": sk.verify_key.encode().hex(),
|
||||
"expiry": ts,
|
||||
"messages": messages,
|
||||
"signature": base64.b64encode(sk.sign(b"expire" + ("".join(messages) + str(ts)).encode()).signature).decode()},
|
||||
separators=(',',':'))
|
||||
|
||||
def delete_all(sk):
|
||||
ts = int(time.time() * 1000)
|
||||
return json.dumps({
|
||||
"pubkey": sk.verify_key.encode().hex(),
|
||||
"expiry": ts,
|
||||
"signature": base64.b64encode(sk.sign(b"delete_all" + str(ts).encode()).signature).decode()},
|
||||
separators=(',',':'))
|
||||
|
||||
def delete_msgs(sk, messages):
|
||||
return json.dumps({
|
||||
"pubkey": sk.verify_key.encode().hex(),
|
||||
"messages": messages,
|
||||
"signature": base64.b64encode(sk.sign(b"delete" + "".join(messages).encode()).signature).decode()},
|
||||
separators=(',',':'))
|
||||
|
||||
def delete_before(sk, *, ago=120, timestamp=None):
|
||||
before = timestamp if timestamp else int((time.time() - ago) * 1000)
|
||||
return json.dumps({
|
||||
"pubkey": sk.verify_key.encode().hex(),
|
||||
"before": before,
|
||||
"signature": base64.b64encode(sk.sign(b"delete_before" + str(before).encode()).signature).decode()},
|
||||
separators=(',',':'))
|
||||
|
||||
|
||||
def get_swarm(omq, conn, sk, netid=5):
|
||||
pubkey = "{:02x}".format(netid) + (sk.verify_key if isinstance(sk, SigningKey) else sk.public_key).encode().hex()
|
||||
r = omq.request_future(conn, "storage.get_swarm", [json.dumps({"pubkey": pubkey}).encode()]).get()
|
||||
assert(len(r) == 1)
|
||||
return json.loads(r[0])
|
||||
|
||||
|
||||
def random_swarm_members(swarm, n, exclude={}):
|
||||
return random.sample([s for s in swarm['snodes'] if s['pubkey_ed25519'] not in exclude], n)
|
||||
|
||||
|
||||
def store_n(omq, conn, sk, basemsg, n, *, offset=0, netid=5, now=time.time(), ttl=30):
|
||||
msgs = []
|
||||
pubkey = chr(netid).encode() + (sk.verify_key if isinstance(sk, SigningKey) else sk.public_key).encode()
|
||||
for i in range(n):
|
||||
data = basemsg + f"{i}".encode()
|
||||
ts = int((now - i) * 1000)
|
||||
exp = int((now - i + ttl) * 1000)
|
||||
msgs.append({
|
||||
"data": data,
|
||||
"req": {
|
||||
"pubkey": pubkey.hex(),
|
||||
"timestamp": ts,
|
||||
"expiry": exp,
|
||||
"data": base64.b64encode(data).decode()}
|
||||
})
|
||||
msgs[-1]['future'] = omq.request_future(conn, "storage.store", [json.dumps(msgs[-1]['req']).encode()])
|
||||
msgs[-1]['hash'] = blake2b(pubkey + msgs[-1]['data'], encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert len({m['hash'] for m in msgs}) == len(msgs)
|
||||
|
||||
for m in msgs:
|
||||
resp = m['future'].get()
|
||||
assert len(resp) == 1
|
||||
m['store'] = json.loads(resp[0].decode())
|
||||
|
||||
assert len(m['store']['swarm']) >= 5
|
||||
assert not any('failed' in v for v in m['store']['swarm'].values())
|
||||
assert all(v['hash'] == m['hash'] for v in m['store']['swarm'].values())
|
||||
|
||||
return msgs
|
59
network-tests/subkey.py
Normal file
59
network-tests/subkey.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
from nacl.encoding import RawEncoder
|
||||
from nacl.hash import blake2b
|
||||
from hashlib import sha512
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
import nacl.bindings as sodium
|
||||
from typing import Union
|
||||
|
||||
|
||||
def make_subkey(sk, subuser_or_raw: Union[VerifyKey, bytes]):
|
||||
"""
|
||||
For a subkey signature, given subkey value c, we sign with d=a(c+H(c‖A)), which has
|
||||
verification key D = (c+H(c‖A))A.
|
||||
|
||||
You can pass in either a raw subkey (32 bytes) or a VerifyKey to make one from it.
|
||||
|
||||
Returns length-32 bytes of each of: subkey, privkey, pubkey
|
||||
"""
|
||||
if isinstance(subuser_or_raw, bytes):
|
||||
assert(len(subuser_or_raw) == 32)
|
||||
c = subuser_or_raw
|
||||
else:
|
||||
c = blake2b(sk.verify_key.encode() + subuser_or_raw.encode(), digest_size=32, encoder=RawEncoder)
|
||||
|
||||
a = sodium.crypto_sign_ed25519_sk_to_curve25519(sk.encode() + sk.verify_key.encode())
|
||||
d = sodium.crypto_core_ed25519_scalar_mul(
|
||||
a,
|
||||
sodium.crypto_core_ed25519_scalar_add(
|
||||
c,
|
||||
blake2b(
|
||||
c + sk.verify_key.encode(), key=b'OxenSSSubkey', digest_size=32, encoder=RawEncoder
|
||||
),
|
||||
),
|
||||
)
|
||||
D = sodium.crypto_scalarmult_ed25519_base_noclamp(d)
|
||||
return c, d, D
|
||||
|
||||
|
||||
def sha512_multipart(*message_parts):
|
||||
"""Given any number of arguments, returns the SHA512 hash of them concatenated together. This
|
||||
also does one level of flatting if any of the given parts are a list or tuple."""
|
||||
hasher = sha512()
|
||||
for m in message_parts:
|
||||
if isinstance(m, list) or isinstance(m, tuple):
|
||||
for mi in m:
|
||||
hasher.update(mi)
|
||||
else:
|
||||
hasher.update(m)
|
||||
return hasher.digest()
|
||||
|
||||
|
||||
# Mostly copied from SOGS auth example; the signature math here is identical (just using d/D instead
|
||||
# of ka/kA):
|
||||
def sign(message_parts, s: SigningKey, d: bytes, D: bytes):
|
||||
H_rh = sha512(s.encode()).digest()[32:]
|
||||
r = sodium.crypto_core_ed25519_scalar_reduce(sha512_multipart(H_rh, D, message_parts))
|
||||
sig_R = sodium.crypto_scalarmult_ed25519_base_noclamp(r)
|
||||
HRAM = sodium.crypto_core_ed25519_scalar_reduce(sha512_multipart(sig_R, D, message_parts))
|
||||
sig_s = sodium.crypto_core_ed25519_scalar_add(r, sodium.crypto_core_ed25519_scalar_mul(HRAM, d))
|
||||
return sig_R + sig_s
|
249
network-tests/test_batch.py
Normal file
249
network-tests/test_batch.py
Normal file
|
@ -0,0 +1,249 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import HexEncoder, Base64Encoder, RawEncoder
|
||||
from nacl.hash import blake2b
|
||||
from hashlib import sha512
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
import nacl.bindings as sodium
|
||||
from oxenc import bt_serialize, bt_deserialize
|
||||
|
||||
|
||||
def test_batch_json(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Store two messages for myself
|
||||
s = omq.request_future(conn, 'storage.batch', [json.dumps({
|
||||
"requests": [
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode(),
|
||||
"signature": sk.sign(f"store42{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"xyz 123").decode(),
|
||||
"signature": sk.sign(f"store42{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
assert "results" in s
|
||||
assert len(s["results"]) == 2
|
||||
assert s["results"][0]["code"] == 200
|
||||
assert s["results"][1]["code"] == 200
|
||||
|
||||
hash0 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
hash1 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
assert s["results"][0]["body"]["hash"] == hash0
|
||||
assert s["results"][1]["body"]["hash"] == hash1
|
||||
|
||||
|
||||
def test_batch_bt(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Store two messages for myself
|
||||
s = omq.request_future(conn, 'storage.batch', [bt_serialize({
|
||||
"requests": [
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": b"abc 123",
|
||||
"signature": sk.sign(f"store42{ts}".encode()).signature,
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": b"xyz 123",
|
||||
"signature": sk.sign(f"store42{ts}".encode()).signature,
|
||||
},
|
||||
},
|
||||
],
|
||||
})]).get()
|
||||
assert len(s) == 1
|
||||
s = bt_deserialize(s[0])
|
||||
assert b"results" in s
|
||||
assert len(s[b"results"]) == 2
|
||||
assert s[b"results"][0][b"code"] == 200
|
||||
assert s[b"results"][1][b"code"] == 200
|
||||
|
||||
hash0 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
|
||||
encoder=Base64Encoder).rstrip(b'=')
|
||||
hash1 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
|
||||
encoder=Base64Encoder).rstrip(b'=')
|
||||
assert s[b"results"][0][b"body"][b"hash"] == hash0
|
||||
assert s[b"results"][1][b"body"][b"hash"] == hash1
|
||||
|
||||
|
||||
def test_sequence(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Sequence some commands:
|
||||
s = omq.request_future(conn, 'storage.sequence', [json.dumps({
|
||||
"requests": [
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "retrieve",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"xyz 123").decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "delete_all",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"delete_all{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "retrieve",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
assert "results" in s
|
||||
assert len(s["results"]) == 5
|
||||
h0 = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
h1 = blake2b(b'\x05' + sk.verify_key.encode() + b'xyz 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
assert s["results"][0]["body"]["hash"] == h0
|
||||
assert s["results"][1]["body"]["messages"] == [{"data": "YWJjIDEyMw==", "expiration": ts + ttl, "hash": h0, "timestamp": ts}]
|
||||
assert s["results"][2]["body"]["hash"] == h1
|
||||
assert len(s["results"][3]["body"]["swarm"]) > 0
|
||||
for sw in s["results"][3]["body"]["swarm"].values():
|
||||
assert set(sw["deleted"]) == {h0, h1}
|
||||
assert s["results"][4]["body"]["messages"] == []
|
||||
|
||||
|
||||
def test_failing_sequence(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
commands = {
|
||||
"requests": [
|
||||
{
|
||||
"method": "store",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"namespace": 33, # will fail because no auth
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
"method": "retrieve",
|
||||
"params": {
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Sequence some commands:
|
||||
s_s = omq.request_future(conn, 'storage.sequence', [json.dumps(commands).encode()])
|
||||
s_b = omq.request_future(conn, 'storage.batch', [json.dumps(commands).encode()])
|
||||
|
||||
s_s = s_s.get()
|
||||
s_b = s_b.get()
|
||||
|
||||
# The sequence should fail the store, and thus not attempt the retrieve:
|
||||
assert len(s_s) == 1
|
||||
s_s = json.loads(s_s[0])
|
||||
assert "results" in s_s
|
||||
assert len(s_s["results"]) == 1
|
||||
assert s_s["results"][0]["code"] == 401
|
||||
assert s_s["results"][0]["body"] == "store: signature required to store to namespace 33"
|
||||
|
||||
# The same thing as a batch should fail but also do the retrieve:
|
||||
assert len(s_b) == 1
|
||||
s_b = json.loads(s_b[0])
|
||||
assert "results" in s_b
|
||||
assert len(s_b["results"]) == 2
|
||||
assert s_b["results"][0]["code"] == 401
|
||||
assert s_b["results"][0]["body"] == "store: signature required to store to namespace 33"
|
||||
assert s_b["results"][1]["code"] == 200
|
||||
assert s_b["results"][1]["body"]["messages"] == []
|
315
network-tests/test_big_retrieve.py
Normal file
315
network-tests/test_big_retrieve.py
Normal file
|
@ -0,0 +1,315 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import Base64Encoder
|
||||
from nacl.signing import SigningKey
|
||||
import nacl.utils
|
||||
import pytest
|
||||
|
||||
# This test runs in a few seconds, a short ttl is fine.
|
||||
ttl = 600_000
|
||||
|
||||
# Size of the msg we store for the tests
|
||||
msg_size = 70000
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def big_store(omq, random_sn, exclude):
|
||||
sk = SigningKey.generate()
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
pk = '03' + sk.verify_key.encode().hex()
|
||||
|
||||
hashes = []
|
||||
for x in range(12):
|
||||
s = []
|
||||
for y in range(10):
|
||||
ts = int(time.time() * 1000)
|
||||
exp = ts + ttl
|
||||
msg = nacl.utils.random(msg_size)
|
||||
s.append(omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(msg).decode()}).encode()]))
|
||||
for si in s:
|
||||
si = si.get()
|
||||
assert len(si) == 1
|
||||
si = json.loads(si[0])
|
||||
assert 'hash' in si
|
||||
hashes.append(si['hash'])
|
||||
|
||||
return {
|
||||
'conn': conn,
|
||||
'sk': sk,
|
||||
'pk': pk,
|
||||
'hashes': hashes
|
||||
}
|
||||
|
||||
|
||||
def test_retrieve_count(omq, big_store):
|
||||
|
||||
conn = big_store['conn']
|
||||
sk = big_store['sk']
|
||||
pk = big_store['pk']
|
||||
hashes = big_store['hashes']
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "retrieve{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
|
||||
s5 = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 5
|
||||
})])
|
||||
|
||||
s8 = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 8,
|
||||
"last_hash": hashes[-3]
|
||||
})])
|
||||
|
||||
s20 = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 20,
|
||||
"last_hash": hashes[110]
|
||||
})])
|
||||
|
||||
# This one is a little tricky: our last one is the limit, but we should still get more: false
|
||||
s10 = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 10,
|
||||
"last_hash": hashes[-11]
|
||||
})])
|
||||
|
||||
s5 = s5.get()
|
||||
assert len(s5) == 1
|
||||
s5 = json.loads(s5[0])
|
||||
assert [x['hash'] for x in s5['messages']] == hashes[0:5]
|
||||
assert s5['more']
|
||||
|
||||
|
||||
s8 = s8.get()
|
||||
assert len(s8) == 1
|
||||
s8 = json.loads(s8[0])
|
||||
assert [x['hash'] for x in s8['messages']] == hashes[-2:]
|
||||
assert not s8['more']
|
||||
|
||||
|
||||
s20 = s20.get()
|
||||
assert len(s20) == 1
|
||||
s20 = json.loads(s20[0])
|
||||
assert [x['hash'] for x in s20['messages']] == hashes[111:]
|
||||
assert not s20['more']
|
||||
|
||||
# We request 100, but should hit the implicit max size at 83
|
||||
s100 = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 100,
|
||||
"last_hash": hashes[10]
|
||||
})])
|
||||
|
||||
|
||||
|
||||
s100 = s100.get()
|
||||
assert len(s100) == 1
|
||||
s100 = json.loads(s100[0])
|
||||
assert [x['hash'] for x in s100['messages']] == hashes[11:94]
|
||||
assert s100['more']
|
||||
|
||||
|
||||
s10 = s10.get()
|
||||
assert len(s10) == 1
|
||||
s10 = json.loads(s10[0])
|
||||
assert [x['hash'] for x in s10['messages']] == hashes[-10:]
|
||||
assert not s10['more']
|
||||
|
||||
|
||||
|
||||
|
||||
def test_retrieve_size(omq, big_store):
|
||||
|
||||
conn = big_store['conn']
|
||||
sk = big_store['sk']
|
||||
pk = big_store['pk']
|
||||
hashes = big_store['hashes']
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "retrieve{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
|
||||
s500k = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_size": 500_000,
|
||||
"last_hash": hashes[2]
|
||||
})])
|
||||
|
||||
s600k = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_size": 600_000,
|
||||
"last_hash": hashes[-8]
|
||||
})])
|
||||
|
||||
smax = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_size": -1,
|
||||
"last_hash": hashes[2]
|
||||
})])
|
||||
|
||||
smax_nomore = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_size": -1,
|
||||
"last_hash": hashes[110]
|
||||
})])
|
||||
|
||||
sthird = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_size": -3,
|
||||
"last_hash": hashes[49]
|
||||
})])
|
||||
|
||||
sdefault = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"last_hash": hashes[89]
|
||||
})])
|
||||
|
||||
sdefault_nomore = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"last_hash": hashes[103]
|
||||
})])
|
||||
|
||||
s500k = s500k.get()
|
||||
assert len(s500k) == 1
|
||||
s500k = json.loads(s500k[0])
|
||||
assert [x['hash'] for x in s500k['messages']] == hashes[3:8]
|
||||
assert s500k['more']
|
||||
|
||||
|
||||
s600k = s600k.get()
|
||||
assert len(s600k) == 1
|
||||
s600k = json.loads(s600k[0])
|
||||
assert [x['hash'] for x in s600k['messages']] == hashes[-7:-1]
|
||||
assert s600k['more']
|
||||
|
||||
|
||||
# Max retrieve size (-1) is 7.8MB, so with 70000*4/3=93333 bytes (plus a bit) per message
|
||||
# retrieved, we should get 83 messages back.
|
||||
smax = smax.get()
|
||||
assert len(smax) == 1
|
||||
smax = json.loads(smax[0])
|
||||
assert [x['hash'] for x in smax['messages']] == hashes[3:86]
|
||||
assert smax['more']
|
||||
|
||||
|
||||
# 1/3 max retrieve should fit 27:
|
||||
sthird = sthird.get()
|
||||
assert len(sthird) == 1
|
||||
sthird = json.loads(sthird[0])
|
||||
assert [x['hash'] for x in sthird['messages']] == hashes[50:77]
|
||||
assert sthird['more']
|
||||
|
||||
|
||||
# Default is 1/5, should fit 16:
|
||||
sdefault = sdefault.get()
|
||||
assert len(sdefault) == 1
|
||||
sdefault = json.loads(sdefault[0])
|
||||
assert [x['hash'] for x in sdefault['messages']] == hashes[90:106]
|
||||
assert sdefault['more']
|
||||
|
||||
|
||||
smax_nomore = smax_nomore.get()
|
||||
assert len(smax_nomore) == 1
|
||||
smax_nomore = json.loads(smax_nomore[0])
|
||||
assert [x['hash'] for x in smax_nomore['messages']] == hashes[111:]
|
||||
assert not smax_nomore['more']
|
||||
|
||||
|
||||
sdefault_nomore = sdefault_nomore.get()
|
||||
assert len(sdefault_nomore) == 1
|
||||
sdefault_nomore = json.loads(sdefault_nomore[0])
|
||||
assert [x['hash'] for x in sdefault_nomore['messages']] == hashes[-16:]
|
||||
assert not sdefault_nomore['more']
|
||||
|
||||
|
||||
def test_retrieve_size_and_count(omq, big_store):
|
||||
|
||||
conn = big_store['conn']
|
||||
sk = big_store['sk']
|
||||
pk = big_store['pk']
|
||||
hashes = big_store['hashes']
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "retrieve{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
|
||||
s5_or_1M = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 5,
|
||||
"max_size": 1_000_000,
|
||||
"last_hash": hashes[19]
|
||||
})])
|
||||
|
||||
s10_or_700k = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": pk,
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"max_count": 10,
|
||||
"max_size": 700_000,
|
||||
"last_hash": hashes[29]
|
||||
})])
|
||||
|
||||
s5_or_1M = s5_or_1M.get()
|
||||
assert len(s5_or_1M) == 1
|
||||
s5_or_1M = json.loads(s5_or_1M[0])
|
||||
assert [x['hash'] for x in s5_or_1M['messages']] == hashes[20:25]
|
||||
assert s5_or_1M['more']
|
||||
|
||||
s10_or_700k = s10_or_700k.get()
|
||||
assert len(s10_or_700k) == 1
|
||||
s10_or_700k = json.loads(s10_or_700k[0])
|
||||
assert [x['hash'] for x in s10_or_700k['messages']] == hashes[30:37]
|
||||
assert s10_or_700k['more']
|
354
network-tests/test_deletes.py
Normal file
354
network-tests/test_deletes.py
Normal file
|
@ -0,0 +1,354 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.hash import blake2b
|
||||
from nacl.signing import VerifyKey
|
||||
import nacl.exceptions
|
||||
|
||||
def test_delete_all(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "delete_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.delete_all', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
msg_hashes = sorted(m['hash'] for m in msgs)
|
||||
|
||||
# signature of ( PUBKEY_HEX || TIMESTAMP || DELETEDHASH[0] || ... || DELETEDHASH[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *msg_hashes)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == msg_hashes
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
|
||||
r = omq.request_future(conns[0], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert not r['messages']
|
||||
|
||||
|
||||
def test_stale_delete_all(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sn = ss.random_swarm_members(swarm, 2, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
msgs = ss.store_n(omq, conn, sk, b"omg123", 5)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = int((time.time() - 120) * 1000)
|
||||
to_sign = "delete_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = {
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sig
|
||||
}
|
||||
|
||||
resp_too_old = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()])
|
||||
|
||||
ts = int((time.time() + 120) * 1000)
|
||||
to_sign = "delete_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params["signature"] = sig
|
||||
|
||||
resp_too_new = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()])
|
||||
|
||||
assert resp_too_old.get() == [b'406', b'delete_all timestamp too far from current time']
|
||||
assert resp_too_new.get() == [b'406', b'delete_all timestamp too far from current time']
|
||||
|
||||
|
||||
def test_delete(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, netid=2)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5, netid=2)
|
||||
|
||||
my_ss_id = '02' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
actual_del_msgs = sorted(msgs[i]['hash'] for i in (1, 4))
|
||||
# Deliberately mis-sort the requested hashes to verify that the return is sorted as expected
|
||||
del_msgs = sorted(actual_del_msgs + ['garbageYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I'], reverse=True)
|
||||
to_sign = ("delete" + "".join(del_msgs)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"messages": del_msgs,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.delete', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || RMSG[0] || ... || RMSG[N] || DMSG[0] || ... || DMSG[M] )
|
||||
expected_signed = "".join(
|
||||
(my_ss_id, *del_msgs, *actual_del_msgs)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == actual_del_msgs
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(conns[0], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 3
|
||||
|
||||
|
||||
def test_delete_required(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, netid=2)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 2, netid=2)
|
||||
|
||||
my_ss_id = '02' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
actual_del_msgs = sorted(m['hash'] for m in msgs)
|
||||
del_msgs = actual_del_msgs + ['garbageYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I']
|
||||
to_sign = ("delete" + "".join(del_msgs)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": del_msgs,
|
||||
"required": True,
|
||||
"signature": sig
|
||||
}
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.delete', [json.dumps(params)]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
# Submit again; since they are already deleted, this should give back a 404
|
||||
resp = omq.request_future(conns[1], 'storage.delete', [json.dumps(params)]).get()
|
||||
|
||||
assert len(resp) == 2
|
||||
assert resp[0] == b'404'
|
||||
|
||||
# Make sure we don't get a 404 without required specified, even when nothing found:
|
||||
del params["required"]
|
||||
resp = omq.request_future(conns[1], 'storage.delete', [json.dumps(params)]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
|
||||
|
||||
def test_delete_before(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 10)
|
||||
|
||||
# store_n submits msgs with decreasing timestamps:
|
||||
assert all(msgs[i]['req']['timestamp'] > msgs[i+1]['req']['timestamp'] for i in range(len(msgs)-1))
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
# Delete the last couple messages:
|
||||
ts = msgs[8]['req']['timestamp']
|
||||
expected_del = sorted(msgs[i]['hash'] for i in range(8, len(msgs)))
|
||||
|
||||
to_sign = ("delete_before" + str(ts)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"before": ts,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.delete_before', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == expected_del
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(conns[0], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 8
|
||||
|
||||
|
||||
# Delete with no matches:
|
||||
ts = msgs[7]['req']['timestamp'] - 1
|
||||
to_sign = ("delete_before" + str(ts)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"before": ts,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[0], 'storage.delete_before', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts))).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert not v['deleted']
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(conns[0], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 8
|
||||
|
||||
|
||||
# Delete most of the remaining:
|
||||
ts = msgs[1]['req']['timestamp']
|
||||
expected_del = sorted(msgs[i]['hash'] for i in range(1, 8))
|
||||
|
||||
to_sign = ("delete_before" + str(ts)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"before": ts,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[0], 'storage.delete_before', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == expected_del
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(conns[0], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 1
|
||||
|
||||
|
||||
# Delete the last one
|
||||
ts = msgs[0]['req']['timestamp'] + 1
|
||||
expected_del = [msgs[0]['hash']]
|
||||
|
||||
to_sign = ("delete_before" + str(ts)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"before": ts,
|
||||
"signature": sig
|
||||
}).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.delete_before', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || BEFORE || DELETEDHASH[0] || ... || DELETEDHASH[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *expected_del)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == expected_del
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(conns[1], 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert not r['messages']
|
553
network-tests/test_expire.py
Normal file
553
network-tests/test_expire.py
Normal file
|
@ -0,0 +1,553 @@
|
|||
import ss
|
||||
from util import sn_address
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.signing import VerifyKey
|
||||
import nacl.exceptions
|
||||
|
||||
|
||||
def test_expire_all(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 5)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = msgs[2]['req']['expiry']
|
||||
to_sign = "expire_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps({"pubkey": my_ss_id, "expiry": ts, "signature": sig}).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.expire_all', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# 0 and 1 have later expiries than 2, so they should get updated; 2's expiry is already the
|
||||
# given value, and 3/4 are <= so shouldn't get updated.
|
||||
msg_hashes = sorted(msgs[i]['hash'] for i in (0, 1))
|
||||
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || UPDATED[0] || ... || UPDATED[N] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *msg_hashes)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['updated'] == msg_hashes
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
|
||||
r = omq.request_future(
|
||||
conns[0],
|
||||
'storage.retrieve',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(
|
||||
f"retrieve{ts}".encode(), encoder=Base64Encoder
|
||||
).signature.decode(),
|
||||
}
|
||||
).encode()
|
||||
],
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 5
|
||||
|
||||
assert r['messages'][0]['expiration'] == ts
|
||||
assert r['messages'][1]['expiration'] == ts
|
||||
assert r['messages'][2]['expiration'] == ts
|
||||
assert r['messages'][3]['expiration'] == msgs[3]['req']['expiry']
|
||||
assert r['messages'][4]['expiration'] == msgs[4]['req']['expiry']
|
||||
|
||||
|
||||
def test_stale_expire_all(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sn = ss.random_swarm_members(swarm, 2, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
msgs = ss.store_n(omq, conn, sk, b"omg123", 5)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = int((time.time() - 120) * 1000)
|
||||
to_sign = "expire_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = {"pubkey": my_ss_id, "expiry": ts, "signature": sig}
|
||||
|
||||
resp = omq.request_future(conn, 'storage.expire_all', [json.dumps(params).encode()]).get()
|
||||
assert resp == [b'406', b'expire_all timestamp should be >= current time']
|
||||
|
||||
|
||||
def test_expire(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
sns = ss.random_swarm_members(swarm, 2, exclude)
|
||||
conns = [omq.connect_remote(sn_address(sn)) for sn in sns]
|
||||
|
||||
msgs = ss.store_n(omq, conns[0], sk, b"omg123", 10)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
ts = msgs[6]['req']['expiry']
|
||||
hashes = [msgs[i]['hash'] for i in (0, 1, 5, 6, 7, 9)] + [
|
||||
'bepQtTaYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I'
|
||||
]
|
||||
# Make sure `hashes` input isn't provided in sorted order:
|
||||
if hashes[0] < hashes[1]:
|
||||
hashes[0], hashes[1] = hashes[1], hashes[0]
|
||||
actual_update_msgs = sorted(msgs[i]['hash'] for i in (0, 1, 5, 6, 7, 9))
|
||||
assert hashes[0:2] != actual_update_msgs[0:2]
|
||||
|
||||
hashes = sorted(hashes, reverse=True)
|
||||
to_sign = ("expire" + str(ts) + "".join(hashes)).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = json.dumps(
|
||||
{"pubkey": my_ss_id, "messages": hashes, "expiry": ts, "signature": sig}
|
||||
).encode()
|
||||
|
||||
resp = omq.request_future(conns[1], 'storage.expire', [params]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# ( PUBKEY_HEX || EXPIRY || RMSG[0] || ... || RMSG[N] || UMSG[0] || ... || UMSG[M] )
|
||||
expected_signed = "".join((my_ss_id, str(ts), *hashes, *actual_update_msgs)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['updated'] == actual_update_msgs
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
r = omq.request_future(
|
||||
conns[0],
|
||||
'storage.retrieve',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(
|
||||
f"retrieve{ts}".encode(), encoder=Base64Encoder
|
||||
).signature.decode(),
|
||||
}
|
||||
).encode()
|
||||
],
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 10
|
||||
|
||||
for i in range(10):
|
||||
assert (
|
||||
r['messages'][i]['expiration'] == ts if i in (0, 1, 5, 6) else msgs[i]['req']['expiry']
|
||||
)
|
||||
|
||||
|
||||
def test_expire_extend(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
msgs = ss.store_n(omq, conn, sk, b"omg123", 10)
|
||||
|
||||
now = int(time.time() * 1000)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
for m in msgs:
|
||||
assert m["req"]["expiry"] < now + 60_000
|
||||
|
||||
exp_5min = now + 5 * 60 * 1000
|
||||
exp_long = (
|
||||
now + 31 * 24 * 60 * 60 * 1000
|
||||
) # Beyond max TTL, should get shortened to now + max TTL
|
||||
e = omq.request_future(
|
||||
conn,
|
||||
'storage.sequence',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
'requests': [
|
||||
{
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[0:8]],
|
||||
"expiry": exp_5min,
|
||||
"signature": sk.sign(
|
||||
f"expire{exp_5min}{''.join(m['hash'] for m in msgs[0:8])}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[8:]],
|
||||
"expiry": exp_long,
|
||||
"signature": sk.sign(
|
||||
f"expire{exp_long}{''.join(m['hash'] for m in msgs[8:])}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'retrieve',
|
||||
'params': {
|
||||
'pubkey': my_ss_id,
|
||||
'timestamp': now,
|
||||
'signature': sk.sign(
|
||||
f"retrieve{now}".encode(), encoder=Base64Encoder
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
],
|
||||
).get()
|
||||
|
||||
assert len(e) == 1
|
||||
e = json.loads(e[0])
|
||||
assert [x['code'] for x in e['results']] == [200, 200, 200]
|
||||
e = [x['body'] for x in e['results']]
|
||||
|
||||
assert 5 <= len(e[0]['swarm']) <= 10
|
||||
for s in e[0]['swarm'].values():
|
||||
assert s['expiry'] == exp_5min
|
||||
assert s['updated'] == sorted([m["hash"] for m in msgs[0:8]])
|
||||
|
||||
assert 5 <= len(e[1]['swarm']) <= 10
|
||||
for s in e[1]['swarm'].values():
|
||||
# expiry should have been shortened to now + max TTL:
|
||||
assert s['expiry'] < exp_long
|
||||
assert abs(s['expiry'] - 1000 * (time.time() + 30 * 24 * 60 * 60)) <= 5000
|
||||
assert s['updated'] == sorted([m["hash"] for m in msgs[8:]])
|
||||
|
||||
assert set(m['hash'] for m in e[2]['messages']) == set(m['hash'] for m in msgs)
|
||||
exps = {m['hash']: m['expiration'] for m in e[2]['messages']}
|
||||
ts = {m['hash']: m['timestamp'] for m in e[2]['messages']}
|
||||
for m in msgs:
|
||||
assert ts[m['hash']] == m['req']['timestamp']
|
||||
for m in msgs[0:8]:
|
||||
assert exps[m['hash']] == exp_5min
|
||||
for m in msgs[8:]:
|
||||
assert abs(exps[m['hash']] - 1000 * (time.time() + 30 * 24 * 60 * 60)) <= 5000
|
||||
|
||||
|
||||
def test_expire_shorten_extend(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
now_s = time.time()
|
||||
now = int(now_s * 1000)
|
||||
|
||||
msgs = ss.store_n(omq, conn, sk, b"omg123", 10, now=now_s, ttl=60)
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
assert [m["req"]["expiry"] for m in msgs] == [now + x * 1000 for x in range(60, 50, -1)]
|
||||
|
||||
do_not_exist = [
|
||||
'///////////////////////////////////////////',
|
||||
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopq',
|
||||
'rstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUV',
|
||||
]
|
||||
dne_sig = ''.join(do_not_exist)
|
||||
|
||||
exp_20s = now + 20 * 1000
|
||||
exp_30s = now + 30 * 1000
|
||||
exp_45s = now + 45 * 1000
|
||||
exp_10m = now + 10 * 60 * 1000
|
||||
e = omq.request_future(
|
||||
conn,
|
||||
'storage.sequence',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
'requests': [
|
||||
{
|
||||
# shorten 0-3 from 1min to 30s
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[0:4]] + do_not_exist,
|
||||
"expiry": exp_30s,
|
||||
"shorten": True,
|
||||
"signature": sk.sign(
|
||||
f"expireshorten{exp_30s}{''.join(m['hash'] for m in msgs[0:4])}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'get_expiries',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs] + do_not_exist,
|
||||
"timestamp": now,
|
||||
"signature": sk.sign(
|
||||
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
# shorten 4-7 from 1min to 20s
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[4:8]] + do_not_exist,
|
||||
"expiry": exp_20s,
|
||||
"shorten": True,
|
||||
"signature": sk.sign(
|
||||
f"expireshorten{exp_20s}{''.join(m['hash'] for m in msgs[4:8])}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'get_expiries',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs] + do_not_exist,
|
||||
"timestamp": now,
|
||||
"signature": sk.sign(
|
||||
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
# shorten 6-9 to 10min (from 1min); should all fail to shorten
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[6:]],
|
||||
"expiry": exp_10m,
|
||||
"shorten": True,
|
||||
"signature": sk.sign(
|
||||
f"expireshorten{exp_10m}{''.join(m['hash'] for m in msgs[6:])}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'get_expiries',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs] + do_not_exist,
|
||||
"timestamp": now,
|
||||
"signature": sk.sign(
|
||||
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
# shorten 2-5 to 20s; should work for 2-3 (30s) but fail for 4-5
|
||||
# (already <=20s).
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs[2:6]] + do_not_exist,
|
||||
"expiry": exp_20s,
|
||||
"shorten": True,
|
||||
"signature": sk.sign(
|
||||
f"expireshorten{exp_20s}{''.join(m['hash'] for m in msgs[2:6])}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'get_expiries',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs] + do_not_exist,
|
||||
"timestamp": now,
|
||||
"signature": sk.sign(
|
||||
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
# length everything to 45s in extend-only mode; should fail for shorten
|
||||
# 2-5 to 20s; should work for 0-7 (20s or 30s) but fail for 8-9 (1min)
|
||||
'method': 'expire',
|
||||
'params': {
|
||||
"pubkey": my_ss_id,
|
||||
"messages": [m["hash"] for m in msgs] + do_not_exist,
|
||||
"expiry": exp_45s,
|
||||
"extend": True,
|
||||
"signature": sk.sign(
|
||||
f"expireextend{exp_45s}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
|
||||
encoder=Base64Encoder,
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
{
|
||||
'method': 'retrieve',
|
||||
'params': {
|
||||
'pubkey': my_ss_id,
|
||||
'timestamp': now,
|
||||
'signature': sk.sign(
|
||||
f"retrieve{now}".encode(), encoder=Base64Encoder
|
||||
).signature.decode(),
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
)
|
||||
],
|
||||
).get()
|
||||
|
||||
assert len(e) == 1
|
||||
e = json.loads(e[0])
|
||||
assert [x['code'] for x in e['results']] == [200] * 10
|
||||
e = [x['body'] for x in e['results']]
|
||||
|
||||
e0_exp = {'expiry': exp_30s, 'updated': sorted(m["hash"] for m in msgs[0:4]), 'unchanged': {}}
|
||||
|
||||
assert 5 <= len(e[0]['swarm']) <= 10
|
||||
for snpk, s in e[0]['swarm'].items():
|
||||
assert s['expiry'] == exp_30s
|
||||
assert s['updated'] == sorted(m["hash"] for m in msgs[0:4])
|
||||
assert s['unchanged'] == {}
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
|
||||
expected_signed = "".join(
|
||||
[my_ss_id, str(exp_30s)] + [m["hash"] for m in msgs[0:4]] + do_not_exist + s['updated']
|
||||
).encode()
|
||||
edpk = VerifyKey(snpk, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(s['signature']))
|
||||
|
||||
assert e[1] == {
|
||||
"expiries": {
|
||||
**{m["hash"]: exp_30s for m in msgs[0:4]},
|
||||
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(4, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
assert 5 <= len(e[2]['swarm']) <= 10
|
||||
for snpk, s in e[2]['swarm'].items():
|
||||
assert s['expiry'] == exp_20s
|
||||
assert s['updated'] == sorted(m["hash"] for m in msgs[4:8])
|
||||
assert s['unchanged'] == {}
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
|
||||
expected_signed = "".join(
|
||||
[my_ss_id, str(exp_20s)] + [m["hash"] for m in msgs[4:8]] + do_not_exist + s['updated']
|
||||
).encode()
|
||||
edpk = VerifyKey(snpk, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(s['signature']))
|
||||
|
||||
assert e[3] == {
|
||||
"expiries": {
|
||||
**{m["hash"]: exp_30s for m in msgs[0:4]},
|
||||
**{m["hash"]: exp_20s for m in msgs[4:8]},
|
||||
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
assert 5 <= len(e[4]['swarm']) <= 10
|
||||
for snpk, s in e[4]['swarm'].items():
|
||||
assert s['expiry'] == exp_10m
|
||||
assert s['updated'] == []
|
||||
assert s['unchanged'] == {
|
||||
**{m["hash"]: exp_20s for m in msgs[6:8]},
|
||||
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
|
||||
}
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
|
||||
expected_signed = "".join(
|
||||
[my_ss_id, str(exp_10m)]
|
||||
+ [m["hash"] for m in msgs[6:]]
|
||||
+ sorted(
|
||||
[
|
||||
f"{msgs[6]['hash']}{exp_20s}",
|
||||
f"{msgs[7]['hash']}{exp_20s}",
|
||||
f"{msgs[8]['hash']}{now + 52_000}",
|
||||
f"{msgs[9]['hash']}{now + 51_000}",
|
||||
]
|
||||
)
|
||||
).encode()
|
||||
edpk = VerifyKey(snpk, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(s['signature']))
|
||||
|
||||
assert e[5] == {
|
||||
"expiries": {
|
||||
**{m["hash"]: exp_30s for m in msgs[0:4]},
|
||||
**{m["hash"]: exp_20s for m in msgs[4:8]},
|
||||
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
assert 5 <= len(e[6]['swarm']) <= 10
|
||||
for snpk, s in e[6]['swarm'].items():
|
||||
assert s['expiry'] == exp_20s
|
||||
assert s['updated'] == sorted(m["hash"] for m in msgs[2:4])
|
||||
assert s['unchanged'] == {m["hash"]: exp_20s for m in msgs[4:6]}
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
|
||||
expected_signed = "".join(
|
||||
[my_ss_id, str(exp_20s)]
|
||||
+ [m["hash"] for m in msgs[2:6]]
|
||||
+ do_not_exist
|
||||
+ sorted(m["hash"] for m in msgs[2:4])
|
||||
+ sorted([f"{msgs[4]['hash']}{exp_20s}", f"{msgs[5]['hash']}{exp_20s}"])
|
||||
).encode()
|
||||
edpk = VerifyKey(snpk, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(s['signature']))
|
||||
|
||||
assert e[7] == {
|
||||
"expiries": {
|
||||
**{m["hash"]: exp_30s for m in msgs[0:2]},
|
||||
**{m["hash"]: exp_20s for m in msgs[2:8]},
|
||||
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
assert 5 <= len(e[8]['swarm']) <= 10
|
||||
for snpk, s in e[8]['swarm'].items():
|
||||
assert s['expiry'] == exp_45s
|
||||
assert s['updated'] == sorted(m["hash"] for m in msgs[0:8])
|
||||
assert s['unchanged'] == {msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)}
|
||||
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
|
||||
expected_signed = "".join(
|
||||
[my_ss_id, str(exp_45s)]
|
||||
+ [m["hash"] for m in msgs]
|
||||
+ do_not_exist
|
||||
+ s['updated']
|
||||
+ sorted([f"{msgs[8]['hash']}{now + 52_000}", f"{msgs[9]['hash']}{now + 51_000}"])
|
||||
).encode()
|
||||
edpk = VerifyKey(snpk, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(s['signature']))
|
||||
|
||||
assert e[9]['hf'] >= [19, 3]
|
||||
assert now - 60_000 <= e[9]['t'] <= now + 60_000
|
||||
del e[9]['hf']
|
||||
del e[9]['t']
|
||||
expected_expiries = [exp_45s] * 8 + [now + 52_000, now + 51_000]
|
||||
assert e[9] == {
|
||||
"messages": [
|
||||
{
|
||||
'data': base64.b64encode(msgs[i]['data']).decode(),
|
||||
'expiration': expected_expiries[i],
|
||||
'hash': msgs[i]['hash'],
|
||||
'timestamp': msgs[i]['req']['timestamp'],
|
||||
}
|
||||
for i in range(len(msgs))
|
||||
],
|
||||
"more": False,
|
||||
}
|
187
network-tests/test_ifelse.py
Normal file
187
network-tests/test_ifelse.py
Normal file
|
@ -0,0 +1,187 @@
|
|||
from util import sn_address
|
||||
import json
|
||||
import ss
|
||||
import time
|
||||
from nacl.encoding import Base64Encoder
|
||||
from nacl.hash import blake2b
|
||||
|
||||
m_yes = b'\x61\xeb'
|
||||
b64_m_yes = 'Yes='
|
||||
m_no = b'\x36\x8a\x5e'
|
||||
b64_m_no = 'Nope'
|
||||
|
||||
def test_ifelse(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86_400_000
|
||||
|
||||
my_ss_id = '05' + sk.verify_key.encode().hex()
|
||||
|
||||
def store_action(msg, ts):
|
||||
return {
|
||||
'method': 'store',
|
||||
'params': {
|
||||
'pubkey': my_ss_id,
|
||||
'timestamp': ts,
|
||||
'ttl': ttl,
|
||||
'data': msg
|
||||
}
|
||||
}
|
||||
|
||||
r = []
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [2000000] },
|
||||
'then': store_action(b64_m_yes, ts),
|
||||
'else': store_action(b64_m_no, ts),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19], 'height_before': 1234 },
|
||||
'then': store_action(b64_m_yes, ts+1),
|
||||
'else': store_action(b64_m_no, ts+1),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19], 'height_before': 123456789 },
|
||||
'then': store_action(b64_m_yes, ts+2),
|
||||
'else': store_action(b64_m_no, ts+2),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19] },
|
||||
'then': store_action(b64_m_yes, ts+3),
|
||||
'else': store_action(b64_m_no, ts+3),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19, 1] },
|
||||
'then': store_action(b64_m_yes, ts+4),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_before': [19] },
|
||||
'then': store_action(b64_m_yes, ts+5),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19] },
|
||||
'else': store_action(b64_m_yes, ts+6),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_before': [19] },
|
||||
'else': store_action(b64_m_no, ts+7),
|
||||
})]))
|
||||
|
||||
r.append(omq.request_future(conn, 'storage.ifelse',
|
||||
[json.dumps({
|
||||
'if': { 'hf_at_least': [19] },
|
||||
'then': {
|
||||
'method': 'ifelse',
|
||||
'params': { 'if': { 'hf_at_least': [19] }, 'then': {
|
||||
'method': 'ifelse',
|
||||
'params': { 'if': { 'height_at_least': 100 }, 'then': {
|
||||
'method': 'ifelse',
|
||||
'params': { 'if': { 'v_at_least': [2, 2] }, 'then': {
|
||||
'method': 'ifelse',
|
||||
'params': {
|
||||
'if': { 'hf_before': [99999, 99] },
|
||||
'then': {
|
||||
'method': 'batch',
|
||||
'params': {
|
||||
'requests': [
|
||||
store_action(b64_m_yes, ts+8),
|
||||
store_action(b64_m_yes, ts+9),
|
||||
store_action(b64_m_yes, ts+10)
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}
|
||||
})]))
|
||||
|
||||
bad = omq.request_future(conn, 'storage.batch',
|
||||
[json.dumps({
|
||||
'requests': [{
|
||||
'method': 'ifelse',
|
||||
'params': {
|
||||
'if': { 'hf_at_least': [19] },
|
||||
'then': { 'method': 'info', 'params': {} },
|
||||
'else': { 'method': 'info', 'params': {} }
|
||||
}
|
||||
}]
|
||||
})])
|
||||
|
||||
def hash(body, ts):
|
||||
return blake2b(b'\x05' + sk.verify_key.encode() + body,
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
for i in range(len(r)):
|
||||
r[i] = r[i].get()
|
||||
print(r[i])
|
||||
assert len(r[i]) == 1
|
||||
r[i] = json.loads(r[i][0])
|
||||
if i not in (5, 6):
|
||||
assert 'result' in r[i] and r[i]['result']['code'] == 200 and 'body' in r[i]['result']
|
||||
|
||||
assert not r[0]['condition']
|
||||
assert r[0]['result']['body']['hash'] == hash(m_no, ts)
|
||||
|
||||
assert not r[1]['condition']
|
||||
assert r[1]['result']['body']['hash'] == hash(m_no, ts+1)
|
||||
|
||||
assert r[2]['condition']
|
||||
assert r[2]['result']['body']['hash'] == hash(m_yes, ts+2)
|
||||
|
||||
assert r[3]['condition']
|
||||
assert r[3]['result']['body']['hash'] == hash(m_yes, ts+3)
|
||||
|
||||
assert r[4]['condition']
|
||||
assert r[4]['result']['body']['hash'] == hash(m_yes, ts+4)
|
||||
|
||||
assert not r[5]['condition']
|
||||
assert 'result' not in r[5]
|
||||
|
||||
assert r[6]['condition']
|
||||
assert 'result' not in r[6]
|
||||
|
||||
assert not r[7]['condition']
|
||||
assert r[7]['result']['body']['hash'] == hash(m_no, ts+7)
|
||||
|
||||
x = r[8]
|
||||
assert x['condition'] # hf >= 19
|
||||
assert x['result']['code'] == 200
|
||||
x = x['result']['body']
|
||||
assert x['condition'] # hf >= 19
|
||||
assert x['result']['code'] == 200
|
||||
x = x['result']['body']
|
||||
assert x['condition'] # height >= 100
|
||||
assert x['result']['code'] == 200
|
||||
x = x['result']['body']
|
||||
assert x['condition'] # v >= 2.2
|
||||
assert x['result']['code'] == 200
|
||||
x = x['result']['body']
|
||||
assert x['condition'] # hf < 99999.99
|
||||
assert x['result']['code'] == 200
|
||||
x = x['result']['body']
|
||||
x = x['results']
|
||||
assert len(x) == 3
|
||||
assert [y['code'] for y in x] == [200, 200, 200]
|
||||
assert x[0]['body']['hash'] == hash(m_yes, ts+8)
|
||||
assert x[1]['body']['hash'] == hash(m_yes, ts+9)
|
||||
assert x[2]['body']['hash'] == hash(m_yes, ts+10)
|
371
network-tests/test_monitor.py
Normal file
371
network-tests/test_monitor.py
Normal file
|
@ -0,0 +1,371 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import subkey
|
||||
import time
|
||||
import datetime
|
||||
from nacl.hash import blake2b
|
||||
from nacl.encoding import RawEncoder, Base64Encoder
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
import nacl.bindings as sodium
|
||||
import json
|
||||
import base64
|
||||
|
||||
import oxenmq
|
||||
from oxenc import bt_serialize, bt_deserialize
|
||||
|
||||
|
||||
def notify_request(
|
||||
sk: SigningKey,
|
||||
ts: int,
|
||||
data: bool,
|
||||
namespaces: list,
|
||||
*,
|
||||
netid=0x05,
|
||||
sessionid: bool = False,
|
||||
subk: bytes = None,
|
||||
):
|
||||
|
||||
req = {'n': sorted(namespaces), 'd': int(data), 't': ts}
|
||||
|
||||
if sessionid:
|
||||
assert netid == 0x05
|
||||
req['P'] = sk.verify_key.encode()
|
||||
account = b'\x05' + sk.verify_key.to_curve25519_public_key().encode()
|
||||
else:
|
||||
account = chr(netid).encode() + sk.verify_key.encode()
|
||||
req['p'] = account
|
||||
|
||||
# ( "MONITOR" || ACCOUNT || TS || D || NS[0] || ... || NS[n] )
|
||||
message = (
|
||||
f'MONITOR{account.hex()}{ts:d}{data:d}' + ','.join(f'{n}' for n in req['n'])
|
||||
).encode()
|
||||
|
||||
if not subk:
|
||||
req['s'] = sk.sign(message).signature
|
||||
else:
|
||||
assert len(subk) == 32
|
||||
req['S'] = subk
|
||||
_, d, D = subkey.make_subkey(sk, subk)
|
||||
sig = subkey.sign(message, sk, d, D)
|
||||
assert len(sig) == 64
|
||||
req['s'] = sig
|
||||
|
||||
return req
|
||||
|
||||
|
||||
def test_monitor_reg_ed(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
o = oxenmq.OxenMQ()
|
||||
o.start()
|
||||
ts = int(time.time())
|
||||
registered = []
|
||||
for snode in swarm['snodes']:
|
||||
snode['addr'] = oxenmq.Address(
|
||||
f"curve://{snode['ip']}:{snode['port_omq']}/{snode['pubkey_x25519']}"
|
||||
)
|
||||
c = o.connect_remote(
|
||||
snode['addr'],
|
||||
on_success=lambda conn: None,
|
||||
on_failure=lambda _, msg: print(f"Connection failed: {msg}"),
|
||||
timeout=datetime.timedelta(seconds=3),
|
||||
)
|
||||
req = notify_request(sk, ts, True, [-5, 0, 23], netid=3)
|
||||
registered.append(
|
||||
o.request_future(
|
||||
c,
|
||||
"monitor.messages",
|
||||
bt_serialize(req),
|
||||
request_timeout=datetime.timedelta(seconds=7),
|
||||
)
|
||||
)
|
||||
|
||||
registered = [r.get() for r in registered]
|
||||
assert registered == [[b'd7:successi1ee']] * len(registered)
|
||||
|
||||
|
||||
def test_monitor_reg_session(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
o = oxenmq.OxenMQ()
|
||||
o.start()
|
||||
ts = int(time.time())
|
||||
registered = []
|
||||
for snode in swarm['snodes']:
|
||||
snode['addr'] = oxenmq.Address(
|
||||
f"curve://{snode['ip']}:{snode['port_omq']}/{snode['pubkey_x25519']}"
|
||||
)
|
||||
c = o.connect_remote(
|
||||
snode['addr'],
|
||||
on_success=lambda conn: None,
|
||||
on_failure=lambda _, msg: print(f"Connection failed: {msg}"),
|
||||
timeout=datetime.timedelta(seconds=3),
|
||||
)
|
||||
req = notify_request(sk, ts, True, [-5, 0, 23], netid=5, sessionid=True)
|
||||
registered.append(
|
||||
o.request_future(
|
||||
c,
|
||||
"monitor.messages",
|
||||
bt_serialize(req),
|
||||
request_timeout=datetime.timedelta(seconds=7),
|
||||
)
|
||||
)
|
||||
|
||||
registered = [r.get() for r in registered]
|
||||
assert registered == [[b'd7:successi1ee']] * len(registered)
|
||||
|
||||
|
||||
def test_monitor_reg_subkey(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
# Highly random subkey tag:
|
||||
subk = b'abcdefghijklmnopqrstuvwxyzomg123'
|
||||
o = oxenmq.OxenMQ()
|
||||
o.start()
|
||||
ts = int(time.time())
|
||||
registered = []
|
||||
for snode in swarm['snodes']:
|
||||
snode['addr'] = oxenmq.Address(
|
||||
f"curve://{snode['ip']}:{snode['port_omq']}/{snode['pubkey_x25519']}"
|
||||
)
|
||||
c = o.connect_remote(
|
||||
snode['addr'],
|
||||
on_success=lambda conn: None,
|
||||
on_failure=lambda _, msg: print(f"Connection failed: {msg}"),
|
||||
timeout=datetime.timedelta(seconds=3),
|
||||
)
|
||||
req = notify_request(sk, ts, True, [-5, 0, 23], netid=2, subk=subk)
|
||||
registered.append(
|
||||
o.request_future(
|
||||
c,
|
||||
"monitor.messages",
|
||||
bt_serialize(req),
|
||||
request_timeout=datetime.timedelta(seconds=7),
|
||||
)
|
||||
)
|
||||
|
||||
registered = [r.get() for r in registered]
|
||||
assert registered == [[b'd7:successi1ee']] * len(registered)
|
||||
|
||||
|
||||
def test_monitor_push(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
conns = {}
|
||||
|
||||
n_notifies = 0
|
||||
|
||||
def handle_notify_message(m):
|
||||
nonlocal conns, n_notifies
|
||||
snode = conns[m.conn]
|
||||
print(f"got notify from {snode['pubkey_legacy']} at {time.time()}")
|
||||
conns[m.conn]['response'].append(bt_deserialize(m.data()[0]))
|
||||
n_notifies += 1
|
||||
|
||||
# We need to make our own OMQ because we need to add the cat/command for notifies
|
||||
o = oxenmq.OxenMQ()
|
||||
o.max_message_size = 10 * 1024 * 1024
|
||||
notify = o.add_category('notify', oxenmq.AuthLevel.none)
|
||||
notify.add_command("message", handle_notify_message)
|
||||
o.start()
|
||||
|
||||
ts = int(time.time())
|
||||
registered = []
|
||||
for snode in swarm['snodes']:
|
||||
snode['response'] = []
|
||||
c = o.connect_remote(
|
||||
oxenmq.Address(f"curve://{snode['ip']}:{snode['port_omq']}/{snode['pubkey_x25519']}"),
|
||||
on_success=lambda conn: connected.add(conn),
|
||||
on_failure=lambda _, msg: print(f"Connection failed: {msg}"),
|
||||
)
|
||||
snode['conn'] = c
|
||||
conns[c] = snode
|
||||
|
||||
registered.append(
|
||||
o.request_future(
|
||||
c,
|
||||
"monitor.messages",
|
||||
bt_serialize(notify_request(sk, ts, True, [-5, 0, 23], netid=3)),
|
||||
request_timeout=datetime.timedelta(seconds=5),
|
||||
)
|
||||
)
|
||||
|
||||
registered = [r.get() for r in registered]
|
||||
assert registered == [[b'd7:successi1ee']] * len(registered)
|
||||
|
||||
# Now go send a message:
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
print(f"starting store at {time.time()}")
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store a message for myself
|
||||
s = omq.request_future(
|
||||
conn,
|
||||
'storage.store',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("abc 123".encode()).decode(),
|
||||
}
|
||||
).encode()
|
||||
],
|
||||
)
|
||||
|
||||
# And another, but this one in a non-monitored namespace:
|
||||
s2 = omq.request_future(
|
||||
conn,
|
||||
'storage.store',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"namespace": 123,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("abc 123".encode()).decode(),
|
||||
}
|
||||
).encode()
|
||||
],
|
||||
)
|
||||
|
||||
# It's pretty rare that we don't get all the responses before the store responses (since they
|
||||
# don't have to be onion-routed back to us), but give it a couple seconds anyway.
|
||||
s = s.get()
|
||||
print(f"got store response at {time.time()}")
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
hash = (
|
||||
blake2b(
|
||||
b'\x03' + sk.verify_key.encode() + b'abc 123',
|
||||
encoder=Base64Encoder,
|
||||
)
|
||||
.decode()
|
||||
.rstrip('=')
|
||||
)
|
||||
assert [v['hash'] for v in s['swarm'].values()] == [hash] * len(s['swarm'])
|
||||
|
||||
s2 = s2.get()
|
||||
|
||||
tries = 0
|
||||
while n_notifies < len(swarm['snodes']) and tries < 8:
|
||||
time.sleep(0.25)
|
||||
tries += 1
|
||||
|
||||
expected_notify = {
|
||||
b'@': b'\x03' + sk.verify_key.encode(),
|
||||
b'h': hash.encode(),
|
||||
b'n': 0,
|
||||
b't': ts,
|
||||
b'z': exp,
|
||||
b'~': b'abc 123',
|
||||
}
|
||||
|
||||
assert [s['response'] for s in swarm['snodes']] == [[expected_notify]] * len(swarm['snodes'])
|
||||
|
||||
|
||||
def test_monitor_multi(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
conns = {}
|
||||
|
||||
n_notifies = 0
|
||||
|
||||
sk2 = SigningKey.generate()
|
||||
|
||||
def handle_notify_message(m):
|
||||
nonlocal conns, n_notifies
|
||||
snode = conns[m.conn]
|
||||
print(f"got notify from {snode['pubkey_legacy']} at {time.time()}")
|
||||
conns[m.conn]['response'].append(bt_deserialize(m.data()[0]))
|
||||
n_notifies += 1
|
||||
|
||||
# We need to make our own OMQ because we need to add the cat/command for notifies
|
||||
o = oxenmq.OxenMQ()
|
||||
o.max_message_size = 10 * 1024 * 1024
|
||||
notify = o.add_category('notify', oxenmq.AuthLevel.none)
|
||||
notify.add_command("message", handle_notify_message)
|
||||
o.start()
|
||||
|
||||
ts = int(time.time())
|
||||
registered = []
|
||||
for snode in swarm['snodes']:
|
||||
snode['response'] = []
|
||||
c = o.connect_remote(
|
||||
oxenmq.Address(f"curve://{snode['ip']}:{snode['port_omq']}/{snode['pubkey_x25519']}"),
|
||||
on_success=lambda conn: connected.add(conn),
|
||||
on_failure=lambda _, msg: print(f"Connection failed: {msg}"),
|
||||
)
|
||||
snode['conn'] = c
|
||||
conns[c] = snode
|
||||
|
||||
registered.append(
|
||||
o.request_future(
|
||||
c,
|
||||
"monitor.messages",
|
||||
bt_serialize(
|
||||
[
|
||||
notify_request(sk2, ts, True, [0], netid=3),
|
||||
notify_request(sk, ts, True, [-5, 0, 23], netid=3),
|
||||
]
|
||||
),
|
||||
request_timeout=datetime.timedelta(seconds=5),
|
||||
)
|
||||
)
|
||||
|
||||
registered = [r.get() for r in registered]
|
||||
assert registered == [[b'l' + b'd7:successi1ee' * 2 + b'e']] * len(registered)
|
||||
|
||||
# Now go send a message:
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
print(f"starting store at {time.time()}")
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store a message for myself
|
||||
s = omq.request_future(
|
||||
conn,
|
||||
'storage.store',
|
||||
[
|
||||
json.dumps(
|
||||
{
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("xyz 123".encode()).decode(),
|
||||
}
|
||||
).encode()
|
||||
],
|
||||
)
|
||||
|
||||
s = s.get()
|
||||
print(f"got store response at {time.time()}")
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
|
||||
tries = 0
|
||||
while n_notifies < len(swarm['snodes']) and tries < 8:
|
||||
time.sleep(0.25)
|
||||
tries += 1
|
||||
|
||||
for sn in s['swarm'].values():
|
||||
hash = sn['hash']
|
||||
break
|
||||
|
||||
expected_notify = {
|
||||
b'@': b'\x03' + sk.verify_key.encode(),
|
||||
b'h': hash.encode(),
|
||||
b'n': 0,
|
||||
b't': ts,
|
||||
b'z': exp,
|
||||
b'~': b'xyz 123',
|
||||
}
|
||||
|
||||
assert [s['response'] for s in swarm['snodes']] == [[expected_notify]] * len(swarm['snodes'])
|
196
network-tests/test_msg_ns.py
Normal file
196
network-tests/test_msg_ns.py
Normal file
|
@ -0,0 +1,196 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
import secrets
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.hash import blake2b
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
|
||||
def test_store_ns(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store a message (publicly depositable namespace, divisible by 10)
|
||||
spub = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": 40,
|
||||
"data": base64.b64encode("abc 123".encode()).decode()}).encode()])
|
||||
|
||||
# Store a message for myself in a private namespace (not divisible by 10)
|
||||
spriv = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": -42,
|
||||
"data": base64.b64encode("abc 123".encode()).decode(),
|
||||
"signature": sk.sign(f"store-42{ts}".encode(), encoder=Base64Encoder).signature.decode()}).encode()])
|
||||
|
||||
|
||||
|
||||
spub = json.loads(spub.get()[0])
|
||||
|
||||
hpub = blake2b(b'\x05' + sk.verify_key.encode() + b'40' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert len(spub["swarm"]) == len(swarm['snodes'])
|
||||
edkeys = {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
for k, v in spub['swarm'].items():
|
||||
assert k in edkeys
|
||||
assert hpub == v['hash']
|
||||
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(v['hash'].encode(), base64.b64decode(v['signature']))
|
||||
|
||||
# NB: assumes the test machine is reasonably time synced
|
||||
assert(ts - 30000 <= spub['t'] <= ts + 30000)
|
||||
|
||||
|
||||
spriv = json.loads(spriv.get()[0])
|
||||
hpriv = blake2b(b'\x05' + sk.verify_key.encode() + b'-42' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert len(spriv["swarm"]) == len(swarm['snodes'])
|
||||
edkeys = {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
for k, v in spriv['swarm'].items():
|
||||
assert k in edkeys
|
||||
assert hpriv == v['hash']
|
||||
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(v['hash'].encode(), base64.b64decode(v['signature']))
|
||||
|
||||
# NB: assumes the test machine is reasonably time synced
|
||||
assert(ts - 30000 <= spriv['t'] <= ts + 30000)
|
||||
|
||||
rpub = omq.request_future(conn, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"namespace": 40,
|
||||
"signature": sk.sign(f"retrieve40{ts}".encode(), encoder=Base64Encoder).signature.decode()}).encode()])
|
||||
rpriv = omq.request_future(conn, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"namespace": -42,
|
||||
"signature": sk.sign(f"retrieve-42{ts}".encode(), encoder=Base64Encoder).signature.decode()}).encode()])
|
||||
rdenied = omq.request_future(conn, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"namespace": 40 }).encode()])
|
||||
|
||||
rpub = rpub.get()
|
||||
assert len(rpub) == 1
|
||||
rpub = json.loads(rpub[0])
|
||||
assert len(rpub["messages"]) == 1
|
||||
assert rpub["messages"][0]["hash"] == hpub
|
||||
|
||||
rpriv = rpriv.get()
|
||||
assert len(rpriv) == 1
|
||||
rpriv = json.loads(rpriv[0])
|
||||
assert len(rpriv["messages"]) == 1
|
||||
assert rpriv["messages"][0]["hash"] == hpriv
|
||||
|
||||
assert rdenied.get() == [b'400', b"invalid request: Required field 'signature' missing"]
|
||||
|
||||
|
||||
|
||||
def test_legacy_closed_ns(omq, random_sn, sk, exclude):
|
||||
# For legacy closed groups the secret key is generated but then immediately discarded; it's only
|
||||
# used to generate a primary key storage address:
|
||||
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# namespace -10 is a special, no-auth namespace for legacy closed group messages.
|
||||
sclosed = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": -10,
|
||||
"data": base64.b64encode("blah blah".encode()).decode()})])
|
||||
|
||||
sclosed = json.loads(sclosed.get()[0])
|
||||
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'-10' + b'blah blah',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert len(sclosed["swarm"]) == len(swarm['snodes'])
|
||||
edkeys = {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
for k, v in sclosed['swarm'].items():
|
||||
assert k in edkeys
|
||||
assert hash == v['hash']
|
||||
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(v['hash'].encode(), base64.b64decode(v['signature']))
|
||||
|
||||
# NB: assumes the test machine is reasonably time synced
|
||||
assert(ts - 30000 <= sclosed['t'] <= ts + 30000)
|
||||
|
||||
# Now retrieve it: this is the only namespace we can access without authentication
|
||||
r = omq.request_future(conn, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"namespace": -10,
|
||||
}).encode()])
|
||||
|
||||
r = r.get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
|
||||
assert len(r['messages']) == 1
|
||||
msg = r['messages'][0]
|
||||
assert base64.b64decode(msg['data']) == b'blah blah'
|
||||
assert msg['timestamp'] == ts
|
||||
assert msg['expiration'] == exp
|
||||
assert msg['hash'] == hash
|
||||
|
||||
|
||||
def test_store_invalid_ns(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Attempt to store a message without authentication in a non-public (% 10 != 0) namespace:
|
||||
s42 = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": 42,
|
||||
"data": base64.b64encode("abc 123".encode()).decode()}).encode()])
|
||||
|
||||
# Attempt to store a message in a too-big/too-small namespace:
|
||||
s32k = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": 32768,
|
||||
"data": base64.b64encode("abc 123".encode()).decode()}).encode()])
|
||||
|
||||
# Bad signature:
|
||||
dude_sk = SigningKey.generate()
|
||||
sdude = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"namespace": -32123,
|
||||
"signature": dude_sk.sign(f"store-32123{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
"data": base64.b64encode("abc 123".encode()).decode()}).encode()])
|
||||
|
||||
assert s42.get() == [b'401', b'store: signature required to store to namespace 42']
|
||||
assert s32k.get() == [b'400', b"invalid request: Invalid value given for 'namespace': value out of range"]
|
||||
assert sdude.get() == [b'401', b"store signature verification failed"]
|
140
network-tests/test_session_auth.py
Normal file
140
network-tests/test_session_auth.py
Normal file
|
@ -0,0 +1,140 @@
|
|||
import ss
|
||||
from util import sn_address
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.hash import blake2b
|
||||
from nacl.signing import VerifyKey, SigningKey
|
||||
from nacl.public import PrivateKey
|
||||
import nacl.exceptions
|
||||
|
||||
def test_session_auth(omq, random_sn, sk, exclude):
|
||||
"""
|
||||
Session key handling is a bit convoluted because it follows Signal's messy approach of exposing
|
||||
the more specific x25519 pubkey rather than the more general ed25519 pubkey; this test's SS's
|
||||
ability to handle this messy key situation.
|
||||
"""
|
||||
|
||||
xsk = sk.to_curve25519_private_key()
|
||||
xpk = xsk.public_key
|
||||
|
||||
swarm = ss.get_swarm(omq, random_sn, xsk)
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
msgs = ss.store_n(omq, conn, xsk, b"omg123", 5)
|
||||
|
||||
my_ss_id = '05' + xsk.public_key.encode().hex()
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "delete_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = {
|
||||
"pubkey": my_ss_id,
|
||||
"timestamp": ts,
|
||||
"signature": sig
|
||||
}
|
||||
|
||||
resp = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()]).get()
|
||||
|
||||
# Expect this to fail because we didn't pass the Ed25519 key
|
||||
assert resp == [b'401', b'delete_all signature verification failed']
|
||||
|
||||
# Make sure nothing was actually deleted:
|
||||
r = omq.request_future(conn, 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 5
|
||||
|
||||
# Try signing with some *other* ed25519 key, which should be detected as not corresponding to
|
||||
# the x25519 pubkey and thus still fail
|
||||
fake_sk = SigningKey.generate()
|
||||
fake_sig = fake_sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params['pubkey_ed25519'] = fake_sk.verify_key.encode().hex()
|
||||
params['signature'] = fake_sig
|
||||
resp = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()]).get()
|
||||
|
||||
assert resp == [b'401', b'delete_all signature verification failed']
|
||||
|
||||
# Make sure nothing was actually deleted:
|
||||
r = omq.request_future(conn, 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 5
|
||||
|
||||
# Now send along the correct ed pubkey to make it work
|
||||
params['pubkey_ed25519'] = sk.verify_key.encode().hex()
|
||||
params['signature'] = sig
|
||||
resp = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
# Make sure SS is using the correct pubkey for the signatures (i.e. the session x25519 key)
|
||||
msg_hashes = sorted(m['hash'] for m in msgs)
|
||||
expected_signed = "".join((my_ss_id, str(ts), *msg_hashes)).encode()
|
||||
for k, v in r['swarm'].items():
|
||||
assert v['deleted'] == msg_hashes
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
|
||||
|
||||
# Verify deletion
|
||||
r = omq.request_future(conn, 'storage.retrieve',
|
||||
[json.dumps({
|
||||
"pubkey": my_ss_id,
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]
|
||||
).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert not r['messages']
|
||||
|
||||
|
||||
def test_non_session_no_ed25519(omq, random_sn, sk, exclude):
|
||||
"""
|
||||
Test that the session key hack doesn't work for non-Session addresses (i.e. when not using the
|
||||
05 prefix).
|
||||
"""
|
||||
|
||||
xsk = sk.to_curve25519_private_key()
|
||||
xpk = xsk.public_key
|
||||
|
||||
swarm = ss.get_swarm(omq, random_sn, xsk, netid=4)
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
msgs = ss.store_n(omq, conn, xsk, b"omg123", 4)
|
||||
|
||||
my_ss_id = '04' + xsk.public_key.encode().hex()
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
to_sign = "delete_all{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
params = {
|
||||
"pubkey": my_ss_id,
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sig
|
||||
}
|
||||
|
||||
resp = omq.request_future(conn, 'storage.delete_all', [json.dumps(params).encode()]).get()
|
||||
|
||||
assert resp == [b'400', b'invalid request: pubkey_ed25519 is only permitted for 05[...] pubkeys']
|
300
network-tests/test_store_retrieve.py
Normal file
300
network-tests/test_store_retrieve.py
Normal file
|
@ -0,0 +1,300 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import HexEncoder, Base64Encoder
|
||||
from nacl.hash import blake2b
|
||||
from nacl.signing import VerifyKey
|
||||
|
||||
def test_store(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store a message for myself
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("abc 123".encode()).decode()}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
|
||||
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert len(s["swarm"]) == len(swarm['snodes'])
|
||||
edkeys = {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
for k, v in s['swarm'].items():
|
||||
assert k in edkeys
|
||||
assert hash == v['hash']
|
||||
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
edpk.verify(v['hash'].encode(), base64.b64decode(v['signature']))
|
||||
|
||||
# NB: assumes the test machine is reasonably time synced
|
||||
assert(ts - 30000 <= s['t'] <= ts + 30000)
|
||||
|
||||
|
||||
def test_store_retrieve_unauthenticated(omq, random_sn, sk, exclude):
|
||||
"""Attempts to retrieve messages without authentication. This should fail (as of HF19)."""
|
||||
sns = ss.random_swarm_members(ss.get_swarm(omq, random_sn, sk), 2, exclude)
|
||||
conn1 = omq.connect_remote(sn_address(sns[0]))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store a message for myself
|
||||
s = omq.request_future(conn1, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode()}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
|
||||
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert all(v['hash'] == hash for v in s['swarm'].values())
|
||||
|
||||
conn2 = omq.connect_remote(sn_address(sns[1]))
|
||||
r = omq.request_future(conn2, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex() }).encode()]).get()
|
||||
|
||||
assert r == [b'401', b'retrieve: request signature required']
|
||||
|
||||
|
||||
def test_store_retrieve_authenticated(omq, random_sn, sk, exclude):
|
||||
xsk = sk.to_curve25519_private_key()
|
||||
xpk = xsk.public_key
|
||||
sn_x = ss.random_swarm_members(ss.get_swarm(omq, random_sn, xsk), 1, exclude)[0]
|
||||
sn_ed = ss.random_swarm_members(ss.get_swarm(omq, random_sn, sk), 1, exclude)[0]
|
||||
conn_x = omq.connect_remote(sn_address(sn_x))
|
||||
conn_ed = omq.connect_remote(sn_address(sn_ed))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
# Store message for myself, using both my ed25519 key and x25519 key to test different auth
|
||||
# modes
|
||||
s1 = omq.request_future(conn_x, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + xpk.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode()}).encode()])
|
||||
s2 = omq.request_future(conn_ed, 'storage.store', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"def 456").decode()}).encode()])
|
||||
|
||||
s1 = s1.get()
|
||||
assert len(s1) == 1
|
||||
s1 = json.loads(s1[0])
|
||||
|
||||
hash1 = blake2b(b'\x05' + xpk.encode() + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
assert all(v['hash'] == hash1 for v in s1['swarm'].values())
|
||||
|
||||
s2 = s2.get()
|
||||
assert len(s2) == 1
|
||||
s2 = json.loads(s2[0])
|
||||
|
||||
hash2 = blake2b(b'\x03' + sk.verify_key.encode() + b'def 456',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
|
||||
to_sign = "retrieve{}".format(ts).encode()
|
||||
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
badsig = sig[0:4] + ('z' if sig[4] != 'z' else 'a') + sig[5:]
|
||||
|
||||
r_good1 = omq.request_future(conn_x, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '05' + xpk.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sig,
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex()
|
||||
}).encode()])
|
||||
r_good2 = omq.request_future(conn_ed, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sig
|
||||
}).encode()])
|
||||
r_bad1 = omq.request_future(conn_x, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '05' + xpk.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": badsig, # invalid sig
|
||||
"pubkey_ed25519": sk.verify_key.encode().hex()
|
||||
}).encode()])
|
||||
r_bad2 = omq.request_future(conn_ed, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": badsig # invalid sig
|
||||
}).encode()])
|
||||
r_bad3 = omq.request_future(conn_ed, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
#"signature": badsig # has timestamp but missing sig
|
||||
}).encode()])
|
||||
|
||||
r_good1 = json.loads(r_good1.get()[0])
|
||||
assert len(r_good1['messages']) == 1
|
||||
msg = r_good1['messages'][0]
|
||||
assert msg['data'] == base64.b64encode(b'abc 123').decode()
|
||||
assert msg['timestamp'] == ts
|
||||
assert msg['expiration'] == exp
|
||||
assert msg['hash'] == hash1
|
||||
|
||||
r_good2 = json.loads(r_good2.get()[0])
|
||||
assert len(r_good2['messages']) == 1
|
||||
msg = r_good2['messages'][0]
|
||||
assert msg['data'] == base64.b64encode(b'def 456').decode()
|
||||
assert msg['timestamp'] == ts
|
||||
assert msg['expiration'] == exp
|
||||
assert msg['hash'] == hash2
|
||||
|
||||
assert r_bad1.get() == [b'401', b'retrieve signature verification failed']
|
||||
assert r_bad2.get() == [b'401', b'retrieve signature verification failed']
|
||||
assert r_bad3.get() == [b'400', b"invalid request: Required field 'signature' missing"]
|
||||
|
||||
|
||||
def exactly_one(iterable):
|
||||
found_one = any(iterable)
|
||||
found_more = any(iterable)
|
||||
return found_one and not found_more
|
||||
|
||||
|
||||
def test_store_retrieve_multiple(omq, random_sn, sk, exclude):
|
||||
sns = ss.random_swarm_members(ss.get_swarm(omq, random_sn, sk), 2, exclude)
|
||||
conn1 = omq.connect_remote(sn_address(sns[0]))
|
||||
|
||||
|
||||
basemsg = b"This is my message \x00<--that's a null, this is invalid utf8: \x80\xff"
|
||||
|
||||
# Store 5 messages
|
||||
msgs = ss.store_n(omq, conn1, sk, basemsg, 5)
|
||||
|
||||
# Retrieve all messages from the swarm (should give back the 5 we just stored):
|
||||
conn2 = omq.connect_remote(sn_address(sns[1]))
|
||||
ts = int(time.time() * 1000)
|
||||
resp = omq.request_future(conn2, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert len(r['messages']) == 5
|
||||
for m in r['messages']:
|
||||
data = base64.b64decode(m['data'])
|
||||
source = next(x for x in msgs if x['hash'] == m['hash'])
|
||||
assert source['data'] == data
|
||||
assert source['req']['timestamp'] == m['timestamp']
|
||||
assert source['req']['expiry'] == m['expiration']
|
||||
|
||||
# Store 6 more messages
|
||||
basemsg = b'another msg'
|
||||
new_msgs = ss.store_n(omq, conn2, sk, basemsg, 6, offset=1)
|
||||
|
||||
# Retrieve using a last_hash so that we should get back only the 6:
|
||||
resp = omq.request_future(conn1, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"last_hash": msgs[4]['hash'],
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(resp) == 1
|
||||
r = json.loads(resp[0])
|
||||
|
||||
assert len(r['messages']) == 6
|
||||
for m in r['messages']:
|
||||
data = base64.b64decode(m['data'])
|
||||
source = next(x for x in new_msgs if x['hash'] == m['hash'])
|
||||
assert source['data'] == data
|
||||
assert source['req']['timestamp'] == m['timestamp']
|
||||
assert source['req']['expiry'] == m['expiration']
|
||||
|
||||
# Give an unknown hash which should retrieve all:
|
||||
r = omq.request_future(conn2, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"last_hash": "0123456789012345678901234567890123456789123",
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['messages']) == 11
|
||||
|
||||
|
||||
def test_store_sig_timestamp(omq, random_sn, sk, exclude):
|
||||
"""Tests that sig_timestamp is used properly for the signature both sig_timestamp and timestamp
|
||||
are given."""
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ns = 123
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Should be fine: timestamp is current, and we sign with it (so timestamp is double double-duty
|
||||
# as both the message timestamp, and the signature timestamp):
|
||||
to_sign = f"store{ns}{ts}".encode()
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"namespace": ns,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("msg1".encode()).decode(),
|
||||
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
|
||||
assert 'hash' in s
|
||||
|
||||
|
||||
# Simulate a 100s storage delay:
|
||||
ts -= 100_000
|
||||
|
||||
# Fails because timestamp is too old for a store signature:
|
||||
to_sign = f"store{ns}{ts}".encode()
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"namespace": ns,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("msg2".encode()).decode(),
|
||||
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert s == [b'406', b'store signature timestamp too far from current time']
|
||||
|
||||
# This should work: sig_timestamp is current, timestamp is old:
|
||||
sig_ts = int(time.time() * 1000)
|
||||
to_sign = f"store{ns}{sig_ts}".encode()
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '05' + sk.verify_key.encode().hex(),
|
||||
"namespace": ns,
|
||||
"timestamp": ts,
|
||||
"sig_timestamp": sig_ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("msg3".encode()).decode(),
|
||||
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
291
network-tests/test_subkey_auth.py
Normal file
291
network-tests/test_subkey_auth.py
Normal file
|
@ -0,0 +1,291 @@
|
|||
from util import sn_address
|
||||
import ss
|
||||
import subkey
|
||||
import time
|
||||
import base64
|
||||
import json
|
||||
from nacl.encoding import Base64Encoder, HexEncoder
|
||||
from nacl.hash import blake2b
|
||||
from nacl.signing import SigningKey, VerifyKey
|
||||
import nacl.exceptions
|
||||
|
||||
def test_retrieve_subkey(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Store a message for myself, using master key
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode(),
|
||||
"signature": sk.sign(f"store42{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
for k, v in s['swarm'].items():
|
||||
assert hash == v['hash']
|
||||
|
||||
# Retrieve it using a subkey
|
||||
dude_sk = SigningKey.generate()
|
||||
c, d, D = subkey.make_subkey(sk, dude_sk.verify_key)
|
||||
to_sign = f"retrieve42{ts}".encode()
|
||||
sig = subkey.sign(to_sign, dude_sk, d, D)
|
||||
|
||||
r = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert r["hf"] >= [19, 0]
|
||||
assert len(r["messages"]) == 1
|
||||
assert r["messages"][0]["hash"] == hash
|
||||
|
||||
def test_store_subkey(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
dude_sk = SigningKey.generate()
|
||||
c, d, D = subkey.make_subkey(sk, dude_sk.verify_key)
|
||||
|
||||
sig = subkey.sign(f"store42{ts}".encode(), dude_sk, d, D)
|
||||
|
||||
# Store a message using the subkey
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode("abc 123".encode()).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
assert s["hf"] >= [19, 0]
|
||||
|
||||
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
assert len(s["swarm"]) > 0
|
||||
for k, v in s['swarm'].items():
|
||||
assert hash == v['hash']
|
||||
|
||||
# Retrieve using master key:
|
||||
s = omq.request_future(conn, 'storage.retrieve', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": sk.sign(f"retrieve42{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
assert s["hf"] >= [19, 0]
|
||||
assert len(s["messages"]) == 1
|
||||
assert s["messages"][0]["hash"] == hash
|
||||
|
||||
def test_expire_subkey(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
# Store using the master key
|
||||
msgs = ss.store_n(omq, conn, sk, b"omg123", 3, netid=3)
|
||||
|
||||
now = int(time.time() * 1000)
|
||||
for m in msgs:
|
||||
assert m["req"]["expiry"] < now + 60_000
|
||||
|
||||
dude_sk = SigningKey.generate()
|
||||
c, d, D = subkey.make_subkey(sk, dude_sk.verify_key)
|
||||
|
||||
new_exp = now + 24*60*60*1000
|
||||
|
||||
# Update one of the expiries from ~1min from now -> 1day from now
|
||||
sig = subkey.sign(f"expire{new_exp}{msgs[0]['hash']}".encode(), dude_sk, d, D)
|
||||
r = omq.request_future(conn, 'storage.expire', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'subkey': c.hex(),
|
||||
'messages': [msgs[0]['hash']],
|
||||
'expiry': new_exp,
|
||||
'signature': base64.b64encode(sig).decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['swarm']) > 0
|
||||
for pk, exp in r['swarm'].items():
|
||||
assert exp["expiry"] == new_exp
|
||||
assert exp["updated"] == [msgs[0]['hash']]
|
||||
|
||||
# Attempt to update all three, using the subkey, to half a day from now. msg[0] shouldn't get
|
||||
# updated, because subkeys are only allowed to extend.
|
||||
|
||||
new_exp = now + 12*60*60*1000
|
||||
sig = subkey.sign(
|
||||
f"expire{new_exp}{''.join(m['hash'] for m in msgs)}".encode(), dude_sk, d, D)
|
||||
r = omq.request_future(conn, 'storage.expire', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'subkey': c.hex(),
|
||||
'messages': [m['hash'] for m in msgs],
|
||||
'expiry': new_exp,
|
||||
'signature': base64.b64encode(sig).decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert len(r['swarm']) > 0
|
||||
for pk, exp in r['swarm'].items():
|
||||
assert exp["expiry"] == new_exp
|
||||
assert set(exp["updated"]) == set([m['hash'] for m in msgs[1:]])
|
||||
|
||||
def test_revoke_subkey(omq, random_sn, sk, exclude):
|
||||
swarm = ss.get_swarm(omq, random_sn, sk, 3)
|
||||
|
||||
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
|
||||
conn = omq.connect_remote(sn_address(sn))
|
||||
|
||||
ts = int(time.time() * 1000)
|
||||
ttl = 86400000
|
||||
exp = ts + ttl
|
||||
|
||||
# Store a message for myself, using master key
|
||||
s = omq.request_future(conn, 'storage.store', [json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
'namespace': 42,
|
||||
"timestamp": ts,
|
||||
"ttl": ttl,
|
||||
"data": base64.b64encode(b"abc 123").decode(),
|
||||
"signature": sk.sign(f"store42{ts}".encode(), encoder=Base64Encoder).signature.decode(),
|
||||
}).encode()]).get()
|
||||
assert len(s) == 1
|
||||
s = json.loads(s[0])
|
||||
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
|
||||
encoder=Base64Encoder).decode().rstrip('=')
|
||||
for k, v in s['swarm'].items():
|
||||
assert hash == v['hash']
|
||||
|
||||
# Retrieve it using the subkey
|
||||
dude_sk = SigningKey.generate()
|
||||
c, d, D = subkey.make_subkey(sk, dude_sk.verify_key)
|
||||
to_sign = f"retrieve42{ts}".encode()
|
||||
sig = subkey.sign(to_sign, dude_sk, d, D)
|
||||
|
||||
r = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert r["hf"] >= [19, 0]
|
||||
assert len(r["messages"]) == 1
|
||||
assert r["messages"][0]["hash"] == hash
|
||||
|
||||
# Revoke the subkey
|
||||
r = omq.request_future(conn, 'storage.revoke_subkey', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"revoke_subkey": base64.b64encode(c).decode(),
|
||||
"signature": sk.sign(f"revoke_subkey".encode() + c, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
|
||||
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
|
||||
|
||||
# Check the signature of the revoked subkey response, should be signing ( PUBKEY_HEX || SUBKEY_TAG_BYTES )
|
||||
expected_signed = ('03' + sk.verify_key.encode().hex()).encode() + c
|
||||
for k, v in r['swarm'].items():
|
||||
edpk = VerifyKey(k, encoder=HexEncoder)
|
||||
try:
|
||||
edpk.verify(expected_signed, base64.b64decode(v['signature']))
|
||||
except nacl.exceptions.BadSignatureError as e:
|
||||
print("Bad signature from swarm member {}".format(k))
|
||||
raise e
|
||||
|
||||
# Try retrieve it again using the subkey, should fail
|
||||
r = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
}).encode()]).get()
|
||||
assert r == [b'401', b'retrieve signature verification failed']
|
||||
|
||||
# Revoke another 49 subkeys, the original subkey should still fail to retrieve the messages
|
||||
for i in range (49):
|
||||
more_dude_sk = SigningKey.generate()
|
||||
more_c, more_d, D = subkey.make_subkey(sk, more_dude_sk.verify_key)
|
||||
r = omq.request_future(conn, 'storage.revoke_subkey', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"revoke_subkey": base64.b64encode(more_c).decode(),
|
||||
"signature": sk.sign(f"revoke_subkey".encode() + more_c, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert len(r) == 1
|
||||
|
||||
# Try retrieve it again using the subkey, should fail again
|
||||
r = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
}).encode()]).get()
|
||||
assert r == [b'401', b'retrieve signature verification failed']
|
||||
|
||||
# Revoke one more subkey, the original subkey should now succeed in retrieving the messages
|
||||
more_dude_sk = SigningKey.generate()
|
||||
more_c, more_d, D = subkey.make_subkey(sk, more_dude_sk.verify_key)
|
||||
r = omq.request_future(conn, 'storage.revoke_subkey', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"revoke_subkey": base64.b64encode(more_c).decode(),
|
||||
"signature": sk.sign(f"revoke_subkey".encode() + more_c, encoder=Base64Encoder).signature.decode()
|
||||
}).encode()]).get()
|
||||
assert len(r) == 1
|
||||
|
||||
# Try retrieve it again using the subkey, should succeed now
|
||||
r = omq.request_future(conn, 'storage.retrieve', [
|
||||
json.dumps({
|
||||
"pubkey": '03' + sk.verify_key.encode().hex(),
|
||||
"namespace": 42,
|
||||
"timestamp": ts,
|
||||
"signature": base64.b64encode(sig).decode(),
|
||||
"subkey": base64.b64encode(c).decode(),
|
||||
}).encode()]).get()
|
||||
|
||||
assert len(r) == 1
|
||||
r = json.loads(r[0])
|
||||
assert r["hf"] >= [19, 0]
|
||||
assert len(r["messages"]) == 1
|
||||
assert r["messages"][0]["hash"] == hash
|
4
network-tests/util.py
Normal file
4
network-tests/util.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
from oxenmq import Address
|
||||
|
||||
def sn_address(sn):
|
||||
return Address(sn['ip'], sn['port_omq'], bytes.fromhex(sn['pubkey_x25519']))
|
59
oxenss/CMakeLists.txt
Normal file
59
oxenss/CMakeLists.txt
Normal file
|
@ -0,0 +1,59 @@
|
|||
include_directories(..)
|
||||
|
||||
add_compile_options(-Wall)
|
||||
|
||||
option(WARNINGS_AS_ERRORS "Treat all compiler warnings as errors." OFF)
|
||||
option(EXTRA_WARNINGS "Enable extra compiler warnings." OFF)
|
||||
|
||||
if(EXTRA_WARNINGS)
|
||||
add_compile_options(-Wextra)
|
||||
endif()
|
||||
if(WARNINGS_AS_ERRORS)
|
||||
add_compile_options(-Werror)
|
||||
endif()
|
||||
|
||||
add_subdirectory(common)
|
||||
add_subdirectory(crypto)
|
||||
add_subdirectory(daemon)
|
||||
add_subdirectory(logging)
|
||||
add_subdirectory(rpc)
|
||||
add_subdirectory(server)
|
||||
add_subdirectory(snode)
|
||||
add_subdirectory(storage)
|
||||
add_subdirectory(utils)
|
||||
|
||||
|
||||
# Build Info
|
||||
if(OXENSS_VERSIONTAG)
|
||||
set(VERSIONTAG "${OXENSS_VERSIONTAG}")
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/version.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/version.cpp")
|
||||
else()
|
||||
set(GIT_INDEX_FILE "${PROJECT_SOURCE_DIR}/.git/index")
|
||||
find_package(Git)
|
||||
if(EXISTS ${GIT_INDEX_FILE} AND ( GIT_FOUND OR Git_FOUND) )
|
||||
message(STATUS "Found Git: ${GIT_EXECUTABLE}")
|
||||
|
||||
set(genversion_args "-DGIT=${GIT_EXECUTABLE}")
|
||||
foreach(v oxenss_VERSION oxenss_VERSION_MAJOR oxenss_VERSION_MINOR oxenss_VERSION_PATCH)
|
||||
list(APPEND genversion_args "-D${v}=${${v}}")
|
||||
endforeach()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/version.cpp"
|
||||
COMMAND
|
||||
"${CMAKE_COMMAND}"
|
||||
${genversion_args}
|
||||
"-DSRC=${CMAKE_CURRENT_SOURCE_DIR}/version.cpp.in"
|
||||
"-DDEST=${CMAKE_CURRENT_BINARY_DIR}/version.cpp"
|
||||
"-P" "${PROJECT_SOURCE_DIR}/cmake/GenVersion.cmake"
|
||||
DEPENDS
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/version.cpp.in"
|
||||
"${GIT_INDEX_FILE}")
|
||||
else()
|
||||
message(STATUS "Git was not found! Setting version to to nogit")
|
||||
set(VERSIONTAG "nogit")
|
||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/version.cpp.in" "${CMAKE_CURRENT_BINARY_DIR}/version.cpp")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
add_library(version STATIC "${CMAKE_CURRENT_BINARY_DIR}/version.cpp")
|
8
oxenss/common/CMakeLists.txt
Normal file
8
oxenss/common/CMakeLists.txt
Normal file
|
@ -0,0 +1,8 @@
|
|||
|
||||
add_library(common STATIC
|
||||
namespace.cpp
|
||||
pubkey.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(common PUBLIC oxen::logging filesystem oxenc::oxenc)
|
||||
target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include)
|
6
oxenss/common/format.h
Normal file
6
oxenss/common/format.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <oxen/log/format.hpp>
|
||||
|
||||
// Make ""_format available globally:
|
||||
using namespace oxen::log::literals;
|
47
oxenss/common/formattable.h
Normal file
47
oxenss/common/formattable.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
#pragma once
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <type_traits>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
// Types can opt-in to being fmt-formattable by defining a `to_string()` const member function
|
||||
// that returns something string-like. For scoped enums we instead look for a `to_string(Type
|
||||
// t)` function in the same namespace.
|
||||
//
|
||||
// e.g.
|
||||
// template <> inline constexpr bool to_string_formattable<MyType> = true;
|
||||
template <typename T>
|
||||
constexpr bool to_string_formattable = false;
|
||||
|
||||
#ifdef __cpp_lib_is_scoped_enum
|
||||
using std::is_scoped_enum;
|
||||
using std::is_scoped_enum_v;
|
||||
#else
|
||||
template <typename T, bool = std::is_enum_v<T>>
|
||||
struct is_scoped_enum : std::false_type {};
|
||||
|
||||
template <typename T>
|
||||
struct is_scoped_enum<T, true>
|
||||
: std::bool_constant<!std::is_convertible_v<T, std::underlying_type_t<T>>> {};
|
||||
|
||||
template <typename T>
|
||||
constexpr bool is_scoped_enum_v = is_scoped_enum<T>::value;
|
||||
#endif
|
||||
|
||||
} // namespace oxen
|
||||
|
||||
namespace fmt {
|
||||
template <typename T>
|
||||
struct formatter<T, char, std::enable_if_t<oxen::to_string_formattable<T>>>
|
||||
: formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const T& val, FormatContext& ctx) const {
|
||||
if constexpr (oxen::is_scoped_enum_v<T>)
|
||||
return formatter<std::string_view>::format(to_string(val), ctx);
|
||||
else
|
||||
return formatter<std::string_view>::format(val.to_string(), ctx);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace fmt
|
7
oxenss/common/mainnet.h
Normal file
7
oxenss/common/mainnet.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
namespace oxen {
|
||||
|
||||
inline bool is_mainnet = true;
|
||||
|
||||
}
|
46
oxenss/common/message.h
Normal file
46
oxenss/common/message.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
#pragma once
|
||||
|
||||
#include "namespace.h"
|
||||
#include "pubkey.h"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
/// message received from a client
|
||||
struct message {
|
||||
user_pubkey_t pubkey;
|
||||
std::string hash;
|
||||
namespace_id msg_namespace;
|
||||
std::chrono::system_clock::time_point timestamp;
|
||||
std::chrono::system_clock::time_point expiry;
|
||||
std::string data;
|
||||
|
||||
message() = default;
|
||||
|
||||
message(user_pubkey_t pubkey,
|
||||
std::string hash,
|
||||
namespace_id msg_ns,
|
||||
std::chrono::system_clock::time_point timestamp,
|
||||
std::chrono::system_clock::time_point expiry,
|
||||
std::string data) :
|
||||
pubkey{std::move(pubkey)},
|
||||
hash{std::move(hash)},
|
||||
msg_namespace{msg_ns},
|
||||
timestamp{timestamp},
|
||||
expiry{expiry},
|
||||
data{std::move(data)} {}
|
||||
|
||||
message(std::string hash,
|
||||
namespace_id msg_ns,
|
||||
std::chrono::system_clock::time_point timestamp,
|
||||
std::chrono::system_clock::time_point expiry,
|
||||
std::string data) :
|
||||
hash{std::move(hash)},
|
||||
msg_namespace{msg_ns},
|
||||
timestamp{timestamp},
|
||||
expiry{expiry},
|
||||
data{std::move(data)} {}
|
||||
};
|
||||
|
||||
} // namespace oxen
|
16
oxenss/common/namespace.cpp
Normal file
16
oxenss/common/namespace.cpp
Normal file
|
@ -0,0 +1,16 @@
|
|||
#include "namespace.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <charconv>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
std::string to_string(namespace_id ns) {
|
||||
char buf[6];
|
||||
static_assert(NAMESPACE_MIN >= -99'999 && NAMESPACE_MAX <= 999'999);
|
||||
auto [ptr, ec] = std::to_chars(std::begin(buf), std::end(buf), to_int(ns));
|
||||
assert(ec == std::errc());
|
||||
return std::string(std::begin(buf), ptr - std::begin(buf));
|
||||
}
|
||||
|
||||
} // namespace oxen
|
35
oxenss/common/namespace.h
Normal file
35
oxenss/common/namespace.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include "formattable.h"
|
||||
|
||||
namespace oxen {
|
||||
|
||||
enum class namespace_id : int16_t {
|
||||
Default = 0, // Ordinary Session messages
|
||||
Min = -32768,
|
||||
Max = 32767,
|
||||
SessionSync = 5, // Session sync data for imports & multidevice syncing
|
||||
ClosedV2 = 3, // Reserved for future Session closed group implementations
|
||||
LegacyClosed = -10, // For "old" closed group messages; allows unauthenticated retrieval
|
||||
};
|
||||
|
||||
constexpr bool is_public_namespace(namespace_id ns) {
|
||||
return static_cast<std::underlying_type_t<namespace_id>>(ns) % 10 == 0;
|
||||
}
|
||||
|
||||
constexpr auto to_int(namespace_id ns) {
|
||||
return static_cast<std::underlying_type_t<namespace_id>>(ns);
|
||||
}
|
||||
|
||||
std::string to_string(namespace_id ns);
|
||||
|
||||
constexpr auto NAMESPACE_MIN = to_int(namespace_id::Min);
|
||||
constexpr auto NAMESPACE_MAX = to_int(namespace_id::Max);
|
||||
|
||||
template <>
|
||||
inline constexpr bool to_string_formattable<namespace_id> = true;
|
||||
|
||||
} // namespace oxen
|
57
oxenss/common/pubkey.cpp
Normal file
57
oxenss/common/pubkey.cpp
Normal file
|
@ -0,0 +1,57 @@
|
|||
#include "pubkey.h"
|
||||
#include "mainnet.h"
|
||||
#include <oxenc/hex.h>
|
||||
#include <charconv>
|
||||
#include <cassert>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
user_pubkey_t& user_pubkey_t::load(std::string_view pk) {
|
||||
if (pk.size() == USER_PUBKEY_SIZE_HEX && oxenc::is_hex(pk)) {
|
||||
uint8_t netid;
|
||||
oxenc::from_hex(pk.begin(), pk.begin() + 2, &netid);
|
||||
network_ = netid;
|
||||
pubkey_ = oxenc::from_hex(pk.substr(2));
|
||||
} else if (pk.size() == USER_PUBKEY_SIZE_BYTES) {
|
||||
network_ = static_cast<uint8_t>(pk.front());
|
||||
pubkey_ = pk.substr(1);
|
||||
} else if (!is_mainnet && pk.size() == USER_PUBKEY_SIZE_HEX - 2 && oxenc::is_hex(pk)) {
|
||||
network_ = 5;
|
||||
pubkey_ = oxenc::from_hex(pk);
|
||||
} else if (!is_mainnet && pk.size() == USER_PUBKEY_SIZE_BYTES - 1) {
|
||||
network_ = 5;
|
||||
pubkey_ = pk;
|
||||
} else {
|
||||
network_ = -1;
|
||||
pubkey_.clear();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::string user_pubkey_t::hex() const {
|
||||
return oxenc::to_hex(pubkey_);
|
||||
}
|
||||
|
||||
std::string user_pubkey_t::prefixed_hex() const {
|
||||
std::string hex;
|
||||
if (pubkey_.empty())
|
||||
return hex;
|
||||
hex.reserve(USER_PUBKEY_SIZE_HEX);
|
||||
auto bi = std::back_inserter(hex);
|
||||
if (uint8_t netid = type(); !(netid == 0 && !is_mainnet))
|
||||
oxenc::to_hex(&netid, &netid + 1, bi);
|
||||
oxenc::to_hex(pubkey_.begin(), pubkey_.end(), bi);
|
||||
return hex;
|
||||
}
|
||||
|
||||
std::string user_pubkey_t::prefixed_raw() const {
|
||||
std::string bytes;
|
||||
if (pubkey_.empty())
|
||||
return bytes;
|
||||
bytes.reserve(1 + USER_PUBKEY_SIZE_BYTES);
|
||||
bytes += static_cast<uint8_t>(network_);
|
||||
bytes += pubkey_;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
} // namespace oxen
|
70
oxenss/common/pubkey.h
Normal file
70
oxenss/common/pubkey.h
Normal file
|
@ -0,0 +1,70 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace oxen {
|
||||
|
||||
// Network byte + Ed25519 pubkey, encoded in bytes or hex. On testnet we allow the network byte
|
||||
// to be missing (and treat it as an implicit 00).
|
||||
inline constexpr size_t USER_PUBKEY_SIZE_BYTES = 33;
|
||||
inline constexpr size_t USER_PUBKEY_SIZE_HEX = USER_PUBKEY_SIZE_BYTES * 2;
|
||||
|
||||
class user_pubkey_t {
|
||||
int network_ = -1;
|
||||
std::string pubkey_;
|
||||
|
||||
user_pubkey_t(int network, std::string raw_pk) :
|
||||
network_{network}, pubkey_{std::move(raw_pk)} {}
|
||||
|
||||
friend class DatabaseImpl;
|
||||
|
||||
public:
|
||||
// Default constructor; constructs an invalid pubkey
|
||||
user_pubkey_t() = default;
|
||||
|
||||
// bool conversion: returns true if this object contains a valid pubkey
|
||||
explicit operator bool() const { return !pubkey_.empty(); }
|
||||
|
||||
bool operator==(const user_pubkey_t& other) const {
|
||||
return type() == other.type() && raw() == other.raw();
|
||||
}
|
||||
|
||||
// Replaces the stored pubkey with one parsed from the string `pk`. `pk` can be either raw
|
||||
// bytes (33 bytes of netid + pubkey), or hex (66 hex digits). If `pk` is not a valid
|
||||
// pubkey then `this` is put into an invalid-pubkey state (i.e. `(bool)pk` will be false).
|
||||
// Returns a reference to *this (primary that `if (upk.load(pk)) { ... }` can be used to
|
||||
// load-and-test).
|
||||
user_pubkey_t& load(std::string_view pk);
|
||||
|
||||
// Returns the network id (0-255) that is typically prefixed on the beginning of the pubkey
|
||||
// string; currently 5 is used for Session Ed25519 pubkey IDs on mainnet, 0 is used for
|
||||
// Session IDs on testnet. Returns -1 if this object does not contain a valid pubkey.
|
||||
int type() const { return network_; }
|
||||
|
||||
// Returns the user pubkey hex string, not including the network prefix. Returns an empty
|
||||
// string for an invalid (default constructed) pubkey.
|
||||
std::string hex() const;
|
||||
|
||||
// Returns the user pubkey hex string, including the network prefix (unless on testnet with
|
||||
// netid == 0, in which case there is no prefix). Returns an empty string for an invalid
|
||||
// (default constructed) pubkey.
|
||||
std::string prefixed_hex() const;
|
||||
|
||||
// Returns the raw bytes that make up the pubkey (not including the type/network prefix).
|
||||
const std::string& raw() const { return pubkey_; }
|
||||
|
||||
// Returns the raw bytes that makes up the pubkey, including the type/network prefix byte.
|
||||
// Returns an empty string for an invalid (default constructed) pubkey.
|
||||
std::string prefixed_raw() const;
|
||||
};
|
||||
|
||||
} // namespace oxen
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<oxen::user_pubkey_t> {
|
||||
size_t operator()(const oxen::user_pubkey_t& pk) const {
|
||||
return static_cast<size_t>(pk.type()) ^ hash<std::string>{}(pk.raw());
|
||||
}
|
||||
};
|
||||
} // namespace std
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue