Merge remote-tracking branch 'origin/dev' into stable

This commit is contained in:
Jason Rhinelander 2023-01-25 12:38:03 -04:00
commit 8d236281d0
No known key found for this signature in database
GPG Key ID: C4992CE7A88D4262
29 changed files with 848 additions and 208 deletions

View File

@ -155,7 +155,7 @@ local static_check_and_upload = [
'echo "Building on ${DRONE_STAGE_MACHINE}"',
apt_get_quiet + ' update',
apt_get_quiet + ' install -y eatmydata',
'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y git clang-format-11 jsonnet',
'eatmydata ' + apt_get_quiet + ' install --no-install-recommends -y git clang-format-14 jsonnet',
'./contrib/drone-format-verify.sh',
],
}],
@ -164,7 +164,7 @@ local static_check_and_upload = [
// Various debian builds
debian_pipeline('Debian (amd64)', docker_base + 'debian-sid', lto=true),
debian_pipeline('Debian Debug (amd64)', docker_base + 'debian-sid', build_type='Debug'),
clang(13, lto=true),
clang(14, lto=true),
debian_pipeline('Debian stable (i386)', docker_base + 'debian-stable/i386'),
debian_pipeline('Ubuntu LTS (amd64)', docker_base + 'ubuntu-lts'),
debian_pipeline('Ubuntu latest (amd64)', docker_base + 'ubuntu-rolling'),

View File

@ -13,7 +13,7 @@ endif()
cmake_minimum_required(VERSION 3.13)
project(oxenss
VERSION 2.4.0
VERSION 2.5.0
LANGUAGES CXX C)
set(CMAKE_CXX_STANDARD 17)

View File

@ -5,10 +5,10 @@
set(LOCAL_MIRROR "" CACHE STRING "local mirror path/URL for lib downloads")
set(OPENSSL_VERSION 3.0.5 CACHE STRING "openssl version")
set(OPENSSL_VERSION 3.0.7 CACHE STRING "openssl version")
set(OPENSSL_MIRROR ${LOCAL_MIRROR} https://www.openssl.org/source CACHE STRING "openssl download mirror(s)")
set(OPENSSL_SOURCE openssl-${OPENSSL_VERSION}.tar.gz)
set(OPENSSL_HASH SHA256=aa7d8d9bef71ad6525c55ba11e5f4397889ce49c2c9349dcea6d3e4f0b024a7a
set(OPENSSL_HASH SHA256=83049d042a260e696f62406ac5c08bf706fd84383f945cf21bd61e9ed95c396e
CACHE STRING "openssl source hash")
set(SODIUM_VERSION 1.0.18 CACHE STRING "libsodium version")
@ -36,18 +36,18 @@ set(LIBUV_SOURCE libuv-v${LIBUV_VERSION}.tar.gz)
set(LIBUV_HASH SHA512=91197ff9303112567bbb915bbb88058050e2ad1c048815a3b57c054635d5dc7df458b956089d785475290132236cb0edcfae830f5d749de29a9a3213eeaf0b20
CACHE STRING "libuv source hash")
set(ZLIB_VERSION 1.2.12 CACHE STRING "zlib version")
set(ZLIB_VERSION 1.2.13 CACHE STRING "zlib version")
set(ZLIB_MIRROR ${LOCAL_MIRROR} https://zlib.net
CACHE STRING "zlib mirror(s)")
set(ZLIB_SOURCE zlib-${ZLIB_VERSION}.tar.gz)
set(ZLIB_HASH SHA512=cc2366fa45d5dfee1f983c8c51515e0cff959b61471e2e8d24350dea22d3f6fcc50723615a911b046ffc95f51ba337d39ae402131a55e6d1541d3b095d6c0a14
set(ZLIB_SOURCE zlib-${ZLIB_VERSION}.tar.xz)
set(ZLIB_HASH SHA256=d14c38e313afc35a9a8760dadf26042f51ea0f5d154b0630a31da0540107fb98
CACHE STRING "zlib source hash")
set(CURL_VERSION 7.85.0 CACHE STRING "curl version")
set(CURL_VERSION 7.87.0 CACHE STRING "curl version")
set(CURL_MIRROR ${LOCAL_MIRROR} https://curl.se/download https://curl.askapache.com
CACHE STRING "curl mirror(s)")
set(CURL_SOURCE curl-${CURL_VERSION}.tar.xz)
set(CURL_HASH SHA512=b57cc31649a4f47cc4b482f56a85c86c8e8aaeaf01bc1b51b065fdb9145a9092bc52535e52a85a66432eb163605b2edbf5bc5c33ea6e40e50f26a69ad1365cbd
set(CURL_HASH SHA512=aa125991592667280dce3788aabe81487cf8c55b0afc59d675cc30b76055bb7114f5380b4a0e3b6461a8f81bf9812fa26d493a85f7e01d84263d484a0d699ee7
CACHE STRING "curl source hash")
@ -261,11 +261,11 @@ set_target_properties(libzmq PROPERTIES
set(curl_extra)
if(WIN32)
set(curl_ssl_opts --without-ssl --with-schannel)
set(curl_ssl_opts --with-schannel)
elseif(APPLE)
set(curl_ssl_opts --without-ssl --with-secure-transport)
set(curl_ssl_opts --with-secure-transport)
else()
set(curl_ssl_opts --with-ssl=${DEPS_DESTDIR})
set(curl_ssl_opts --with-openssl=${DEPS_DESTDIR})
set(curl_extra "LIBS=-pthread")
endif()
@ -280,7 +280,8 @@ build_external(curl
--enable-http-auth --enable-doh --disable-mime --enable-dateparse --disable-netrc --without-libidn2
--disable-progress-meter --without-brotli --with-zlib=${DEPS_DESTDIR} ${curl_ssl_opts}
--without-librtmp --disable-versioned-symbols --enable-hidden-symbols
--without-zsh-functions-dir --without-fish-functions-dir
--without-zsh-functions-dir --without-fish-functions-dir --without-zstd
--without-nghttp2 --without-nghttp3 --without-ngtcp2 --without-quiche
"CC=${deps_cc}" "CFLAGS=${deps_noarch_CFLAGS}${cflags_extra}" ${curl_extra}
BUILD_COMMAND true
INSTALL_COMMAND make -C lib install && make -C include install

View File

@ -9,7 +9,7 @@ anybad=
for bin in oxen-storage; do
bad=
if [ "$DRONE_STAGE_OS" == "darwin" ]; then
if otool -L $bin | grep -Ev '^'$bin':|^\t(/usr/lib/libSystem\.|/usr/lib/libc\+\+\.|/usr/lib/libresolv\.|/System/Library/Frameworks/(CoreFoundation|IOKit|Security|SystemConfiguration))'; then
if otool -L $bin | grep -Ev '^'$bin':|^\s*(/usr/lib/libSystem\.|/usr/lib/libc\+\+\.|/usr/lib/libresolv\.|/System/Library/Frameworks/(CoreFoundation|IOKit|Security|SystemConfiguration))'; then
bad=1
fi
elif [ "$DRONE_STAGE_OS" == "linux" ]; then

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash
test "x$IGNORE" != "x" && exit 0
set -e
repo=$(readlink -e $(dirname $0)/../)
clang-format-11 -i $(find $repo/{oxenss,unit_test} | grep -E '\.[hc](pp)?$' | grep -v 'Catch2')
clang-format-14 -i $(find $repo/{oxenss,unit_test} | grep -E '\.[hc](pp)?$' | grep -v 'Catch2')
jsonnetfmt -i $repo/.drone.jsonnet
git --no-pager diff --exit-code --color || (echo -ne '\n\n\e[31;1mLint check failed; please run ./contrib/format.sh\e[0m\n\n' ; exit 1)

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
CLANG_FORMAT_DESIRED_VERSION=11
CLANG_FORMAT_DESIRED_VERSION=14
binary=$(command -v clang-format-$CLANG_FORMAT_DESIRED_VERSION 2>/dev/null)
if [ $? -ne 0 ]; then

@ -1 +1 @@
Subproject commit 29085f02dd72f427bfb844bcf96c0dea193646b7
Subproject commit 12c17d6eab754908cd88f05d09b9388381e47515

2
external/oxen-mq vendored

@ -1 +1 @@
Subproject commit a93c16af04eba58fba2bf7f5726c4669c290bd46
Subproject commit ac6ef82ff6fd20437b7d073466dbef82a95a2173

2
external/oxenc vendored

@ -1 +1 @@
Subproject commit 707a83609fb64d09b61ed1e56c82bf692050d2a1
Subproject commit a869ae2b0152ad70855e3774a425c39a25ae1ca6

View File

@ -58,13 +58,13 @@ def random_swarm_members(swarm, n, exclude={}):
return random.sample([s for s in swarm['snodes'] if s['pubkey_ed25519'] not in exclude], n)
def store_n(omq, conn, sk, basemsg, n, *, offset=0, netid=5):
def store_n(omq, conn, sk, basemsg, n, *, offset=0, netid=5, now=time.time(), ttl=30):
msgs = []
pubkey = chr(netid).encode() + (sk.verify_key if isinstance(sk, SigningKey) else sk.public_key).encode()
for i in range(n):
data = basemsg + f"{i}".encode()
ts = int((time.time() - i) * 1000)
exp = int((time.time() - i + 30) * 1000)
ts = int((now - i) * 1000)
exp = int((now - i + ttl) * 1000)
msgs.append({
"data": data,
"req": {
@ -74,8 +74,7 @@ def store_n(omq, conn, sk, basemsg, n, *, offset=0, netid=5):
"data": base64.b64encode(data).decode()}
})
msgs[-1]['future'] = omq.request_future(conn, "storage.store", [json.dumps(msgs[-1]['req']).encode()])
msgs[-1]['hash'] = blake2b("{}{}".format(ts, exp).encode() + pubkey + msgs[-1]['data'],
encoder=Base64Encoder).decode().rstrip('=')
msgs[-1]['hash'] = blake2b(pubkey + msgs[-1]['data'], encoder=Base64Encoder).decode().rstrip('=')
assert len({m['hash'] for m in msgs}) == len(msgs)

View File

@ -55,9 +55,9 @@ def test_batch_json(omq, random_sn, sk, exclude):
assert s["results"][0]["code"] == 200
assert s["results"][1]["code"] == 200
hash0 = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
hash0 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
hash1 = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
hash1 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
encoder=Base64Encoder).decode().rstrip('=')
assert s["results"][0]["body"]["hash"] == hash0
assert s["results"][1]["body"]["hash"] == hash1
@ -107,9 +107,9 @@ def test_batch_bt(omq, random_sn, sk, exclude):
assert s[b"results"][0][b"code"] == 200
assert s[b"results"][1][b"code"] == 200
hash0 = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
hash0 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
encoder=Base64Encoder).rstrip(b'=')
hash1 = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
hash1 = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'xyz 123',
encoder=Base64Encoder).rstrip(b'=')
assert s[b"results"][0][b"body"][b"hash"] == hash0
assert s[b"results"][1][b"body"][b"hash"] == hash1
@ -177,9 +177,9 @@ def test_sequence(omq, random_sn, sk, exclude):
s = json.loads(s[0])
assert "results" in s
assert len(s["results"]) == 5
h0 = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'abc 123',
h0 = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
h1 = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'xyz 123',
h1 = blake2b(b'\x05' + sk.verify_key.encode() + b'xyz 123',
encoder=Base64Encoder).decode().rstrip('=')
assert s["results"][0]["body"]["hash"] == h0
assert s["results"][1]["body"]["messages"] == [{"data": "YWJjIDEyMw==", "expiration": ts + ttl, "hash": h0, "timestamp": ts}]

View File

@ -7,6 +7,7 @@ from nacl.encoding import HexEncoder, Base64Encoder
from nacl.signing import VerifyKey
import nacl.exceptions
def test_expire_all(omq, random_sn, sk, exclude):
swarm = ss.get_swarm(omq, random_sn, sk)
sns = ss.random_swarm_members(swarm, 2, exclude)
@ -19,11 +20,7 @@ def test_expire_all(omq, random_sn, sk, exclude):
ts = msgs[2]['req']['expiry']
to_sign = "expire_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"expiry": ts,
"signature": sig
}).encode()
params = json.dumps({"pubkey": my_ss_id, "expiry": ts, "signature": sig}).encode()
resp = omq.request_future(conns[1], 'storage.expire_all', [params]).get()
@ -32,7 +29,6 @@ def test_expire_all(omq, random_sn, sk, exclude):
assert set(r['swarm'].keys()) == {x['pubkey_ed25519'] for x in swarm['snodes']}
# 0 and 1 have later expiries than 2, so they should get updated; 2's expiry is already the
# given value, and 3/4 are <= so shouldn't get updated.
msg_hashes = sorted(msgs[i]['hash'] for i in (0, 1))
@ -44,13 +40,21 @@ def test_expire_all(omq, random_sn, sk, exclude):
edpk = VerifyKey(k, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(v['signature']))
r = omq.request_future(conns[0], 'storage.retrieve',
[json.dumps({
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
}).encode()]
).get()
r = omq.request_future(
conns[0],
'storage.retrieve',
[
json.dumps(
{
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sk.sign(
f"retrieve{ts}".encode(), encoder=Base64Encoder
).signature.decode(),
}
).encode()
],
).get()
assert len(r) == 1
r = json.loads(r[0])
assert len(r['messages']) == 5
@ -74,11 +78,7 @@ def test_stale_expire_all(omq, random_sn, sk, exclude):
ts = int((time.time() - 120) * 1000)
to_sign = "expire_all{}".format(ts).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = {
"pubkey": my_ss_id,
"expiry": ts,
"signature": sig
}
params = {"pubkey": my_ss_id, "expiry": ts, "signature": sig}
resp = omq.request_future(conn, 'storage.expire_all', [json.dumps(params).encode()]).get()
assert resp == [b'406', b'expire_all timestamp should be >= current time']
@ -94,7 +94,9 @@ def test_expire(omq, random_sn, sk, exclude):
my_ss_id = '05' + sk.verify_key.encode().hex()
ts = msgs[6]['req']['expiry']
hashes = [msgs[i]['hash'] for i in (0, 1, 5, 6, 7, 9)] + ['bepQtTaYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I']
hashes = [msgs[i]['hash'] for i in (0, 1, 5, 6, 7, 9)] + [
'bepQtTaYrzcuCXO3fZkmk/h3xkMQ3vCh94i5HzLmj3I'
]
# Make sure `hashes` input isn't provided in sorted order:
if hashes[0] < hashes[1]:
hashes[0], hashes[1] = hashes[1], hashes[0]
@ -104,12 +106,9 @@ def test_expire(omq, random_sn, sk, exclude):
hashes = sorted(hashes, reverse=True)
to_sign = ("expire" + str(ts) + "".join(hashes)).encode()
sig = sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
params = json.dumps({
"pubkey": my_ss_id,
"messages": hashes,
"expiry": ts,
"signature": sig
}).encode()
params = json.dumps(
{"pubkey": my_ss_id, "messages": hashes, "expiry": ts, "signature": sig}
).encode()
resp = omq.request_future(conns[1], 'storage.expire', [params]).get()
@ -129,19 +128,29 @@ def test_expire(omq, random_sn, sk, exclude):
print("Bad signature from swarm member {}".format(k))
raise e
r = omq.request_future(conns[0], 'storage.retrieve',
[json.dumps({
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sk.sign(f"retrieve{ts}".encode(), encoder=Base64Encoder).signature.decode()
}).encode()]
).get()
r = omq.request_future(
conns[0],
'storage.retrieve',
[
json.dumps(
{
"pubkey": my_ss_id,
"timestamp": ts,
"signature": sk.sign(
f"retrieve{ts}".encode(), encoder=Base64Encoder
).signature.decode(),
}
).encode()
],
).get()
assert len(r) == 1
r = json.loads(r[0])
assert len(r['messages']) == 10
for i in range(10):
assert r['messages'][i]['expiration'] == ts if i in (0, 1, 5, 6) else msgs[i]['req']['expiry']
assert (
r['messages'][i]['expiration'] == ts if i in (0, 1, 5, 6) else msgs[i]['req']['expiry']
)
def test_expire_extend(omq, random_sn, sk, exclude):
@ -159,41 +168,56 @@ def test_expire_extend(omq, random_sn, sk, exclude):
for m in msgs:
assert m["req"]["expiry"] < now + 60_000
exp_5min = now + 5*60*1000
exp_long = now + 15*24*60*60*1000 # Beyond max TTL, should get shortened to now + max TTL
e = omq.request_future(conn, 'storage.sequence',
[json.dumps({
'requests': [
{
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[0:8]],
"expiry": exp_5min,
"signature": sk.sign(f"expire{exp_5min}{''.join(m['hash'] for m in msgs[0:8])}".encode(),
encoder=Base64Encoder).signature.decode(),
}
},
{
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[8:]],
"expiry": exp_long,
"signature": sk.sign(f"expire{exp_long}{''.join(m['hash'] for m in msgs[8:])}".encode(),
encoder=Base64Encoder).signature.decode(),
}
},
{
'method': 'retrieve',
'params': {
'pubkey': my_ss_id,
'timestamp': now,
'signature': sk.sign(f"retrieve{now}".encode(), encoder=Base64Encoder).signature.decode(),
}
}
]
})]).get()
exp_5min = now + 5 * 60 * 1000
exp_long = (
now + 31 * 24 * 60 * 60 * 1000
) # Beyond max TTL, should get shortened to now + max TTL
e = omq.request_future(
conn,
'storage.sequence',
[
json.dumps(
{
'requests': [
{
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[0:8]],
"expiry": exp_5min,
"signature": sk.sign(
f"expire{exp_5min}{''.join(m['hash'] for m in msgs[0:8])}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[8:]],
"expiry": exp_long,
"signature": sk.sign(
f"expire{exp_long}{''.join(m['hash'] for m in msgs[8:])}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'retrieve',
'params': {
'pubkey': my_ss_id,
'timestamp': now,
'signature': sk.sign(
f"retrieve{now}".encode(), encoder=Base64Encoder
).signature.decode(),
},
},
]
}
)
],
).get()
assert len(e) == 1
e = json.loads(e[0])
@ -209,15 +233,321 @@ def test_expire_extend(omq, random_sn, sk, exclude):
for s in e[1]['swarm'].values():
# expiry should have been shortened to now + max TTL:
assert s['expiry'] < exp_long
assert abs(s['expiry'] - 1000*(time.time() + 14*24*60*60)) <= 5000
assert abs(s['expiry'] - 1000 * (time.time() + 30 * 24 * 60 * 60)) <= 5000
assert s['updated'] == sorted([m["hash"] for m in msgs[8:]])
assert set(m['hash'] for m in e[2]['messages']) == set(m['hash'] for m in msgs)
exps = { m['hash']: m['expiration'] for m in e[2]['messages'] }
ts = { m['hash']: m['timestamp'] for m in e[2]['messages'] }
exps = {m['hash']: m['expiration'] for m in e[2]['messages']}
ts = {m['hash']: m['timestamp'] for m in e[2]['messages']}
for m in msgs:
assert ts[m['hash']] == m['req']['timestamp']
for m in msgs[0:8]:
assert exps[m['hash']] == exp_5min
for m in msgs[8:]:
assert abs(exps[m['hash']] - 1000*(time.time() + 14*24*60*60)) <= 5000
assert abs(exps[m['hash']] - 1000 * (time.time() + 30 * 24 * 60 * 60)) <= 5000
def test_expire_shorten_extend(omq, random_sn, sk, exclude):
swarm = ss.get_swarm(omq, random_sn, sk)
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
conn = omq.connect_remote(sn_address(sn))
now_s = time.time()
now = int(now_s * 1000)
msgs = ss.store_n(omq, conn, sk, b"omg123", 10, now=now_s, ttl=60)
my_ss_id = '05' + sk.verify_key.encode().hex()
assert [m["req"]["expiry"] for m in msgs] == [now + x * 1000 for x in range(60, 50, -1)]
do_not_exist = [
'///////////////////////////////////////////',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopq',
'rstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUV',
]
dne_sig = ''.join(do_not_exist)
exp_20s = now + 20 * 1000
exp_30s = now + 30 * 1000
exp_45s = now + 45 * 1000
exp_10m = now + 10 * 60 * 1000
e = omq.request_future(
conn,
'storage.sequence',
[
json.dumps(
{
'requests': [
{
# shorten 0-3 from 1min to 30s
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[0:4]] + do_not_exist,
"expiry": exp_30s,
"shorten": True,
"signature": sk.sign(
f"expireshorten{exp_30s}{''.join(m['hash'] for m in msgs[0:4])}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'get_expiries',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs] + do_not_exist,
"timestamp": now,
"signature": sk.sign(
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
# shorten 4-7 from 1min to 20s
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[4:8]] + do_not_exist,
"expiry": exp_20s,
"shorten": True,
"signature": sk.sign(
f"expireshorten{exp_20s}{''.join(m['hash'] for m in msgs[4:8])}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'get_expiries',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs] + do_not_exist,
"timestamp": now,
"signature": sk.sign(
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
# shorten 6-9 to 10min (from 1min); should all fail to shorten
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[6:]],
"expiry": exp_10m,
"shorten": True,
"signature": sk.sign(
f"expireshorten{exp_10m}{''.join(m['hash'] for m in msgs[6:])}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'get_expiries',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs] + do_not_exist,
"timestamp": now,
"signature": sk.sign(
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
# shorten 2-5 to 20s; should work for 2-3 (30s) but fail for 4-5
# (already <=20s).
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs[2:6]] + do_not_exist,
"expiry": exp_20s,
"shorten": True,
"signature": sk.sign(
f"expireshorten{exp_20s}{''.join(m['hash'] for m in msgs[2:6])}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'get_expiries',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs] + do_not_exist,
"timestamp": now,
"signature": sk.sign(
f"get_expiries{now}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
# length everything to 45s in extend-only mode; should fail for shorten
# 2-5 to 20s; should work for 0-7 (20s or 30s) but fail for 8-9 (1min)
'method': 'expire',
'params': {
"pubkey": my_ss_id,
"messages": [m["hash"] for m in msgs] + do_not_exist,
"expiry": exp_45s,
"extend": True,
"signature": sk.sign(
f"expireextend{exp_45s}{''.join(m['hash'] for m in msgs)}{dne_sig}".encode(),
encoder=Base64Encoder,
).signature.decode(),
},
},
{
'method': 'retrieve',
'params': {
'pubkey': my_ss_id,
'timestamp': now,
'signature': sk.sign(
f"retrieve{now}".encode(), encoder=Base64Encoder
).signature.decode(),
},
},
]
}
)
],
).get()
assert len(e) == 1
e = json.loads(e[0])
assert [x['code'] for x in e['results']] == [200] * 10
e = [x['body'] for x in e['results']]
e0_exp = {'expiry': exp_30s, 'updated': sorted(m["hash"] for m in msgs[0:4]), 'unchanged': {}}
assert 5 <= len(e[0]['swarm']) <= 10
for snpk, s in e[0]['swarm'].items():
assert s['expiry'] == exp_30s
assert s['updated'] == sorted(m["hash"] for m in msgs[0:4])
assert s['unchanged'] == {}
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
expected_signed = "".join(
[my_ss_id, str(exp_30s)] + [m["hash"] for m in msgs[0:4]] + do_not_exist + s['updated']
).encode()
edpk = VerifyKey(snpk, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(s['signature']))
assert e[1] == {
"expiries": {
**{m["hash"]: exp_30s for m in msgs[0:4]},
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(4, 10)},
}
}
assert 5 <= len(e[2]['swarm']) <= 10
for snpk, s in e[2]['swarm'].items():
assert s['expiry'] == exp_20s
assert s['updated'] == sorted(m["hash"] for m in msgs[4:8])
assert s['unchanged'] == {}
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
expected_signed = "".join(
[my_ss_id, str(exp_20s)] + [m["hash"] for m in msgs[4:8]] + do_not_exist + s['updated']
).encode()
edpk = VerifyKey(snpk, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(s['signature']))
assert e[3] == {
"expiries": {
**{m["hash"]: exp_30s for m in msgs[0:4]},
**{m["hash"]: exp_20s for m in msgs[4:8]},
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
}
}
assert 5 <= len(e[4]['swarm']) <= 10
for snpk, s in e[4]['swarm'].items():
assert s['expiry'] == exp_10m
assert s['updated'] == []
assert s['unchanged'] == {
**{m["hash"]: exp_20s for m in msgs[6:8]},
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
}
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
expected_signed = "".join(
[my_ss_id, str(exp_10m)]
+ [m["hash"] for m in msgs[6:]]
+ sorted(
[
f"{msgs[6]['hash']}{exp_20s}",
f"{msgs[7]['hash']}{exp_20s}",
f"{msgs[8]['hash']}{now + 52_000}",
f"{msgs[9]['hash']}{now + 51_000}",
]
)
).encode()
edpk = VerifyKey(snpk, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(s['signature']))
assert e[5] == {
"expiries": {
**{m["hash"]: exp_30s for m in msgs[0:4]},
**{m["hash"]: exp_20s for m in msgs[4:8]},
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
}
}
assert 5 <= len(e[6]['swarm']) <= 10
for snpk, s in e[6]['swarm'].items():
assert s['expiry'] == exp_20s
assert s['updated'] == sorted(m["hash"] for m in msgs[2:4])
assert s['unchanged'] == {m["hash"]: exp_20s for m in msgs[4:6]}
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
expected_signed = "".join(
[my_ss_id, str(exp_20s)]
+ [m["hash"] for m in msgs[2:6]]
+ do_not_exist
+ sorted(m["hash"] for m in msgs[2:4])
+ sorted([f"{msgs[4]['hash']}{exp_20s}", f"{msgs[5]['hash']}{exp_20s}"])
).encode()
edpk = VerifyKey(snpk, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(s['signature']))
assert e[7] == {
"expiries": {
**{m["hash"]: exp_30s for m in msgs[0:2]},
**{m["hash"]: exp_20s for m in msgs[2:8]},
**{msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)},
}
}
assert 5 <= len(e[8]['swarm']) <= 10
for snpk, s in e[8]['swarm'].items():
assert s['expiry'] == exp_45s
assert s['updated'] == sorted(m["hash"] for m in msgs[0:8])
assert s['unchanged'] == {msgs[i]["hash"]: now + (60 - i) * 1000 for i in range(8, 10)}
# signature of ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
expected_signed = "".join(
[my_ss_id, str(exp_45s)]
+ [m["hash"] for m in msgs]
+ do_not_exist
+ s['updated']
+ sorted([f"{msgs[8]['hash']}{now + 52_000}", f"{msgs[9]['hash']}{now + 51_000}"])
).encode()
edpk = VerifyKey(snpk, encoder=HexEncoder)
edpk.verify(expected_signed, base64.b64decode(s['signature']))
assert e[9]['hf'] >= [19, 3]
assert now - 60_000 <= e[9]['t'] <= now + 60_000
del e[9]['hf']
del e[9]['t']
expected_expiries = [exp_45s] * 8 + [now + 52_000, now + 51_000]
assert e[9] == {
"messages": [
{
'data': base64.b64encode(msgs[i]['data']).decode(),
'expiration': expected_expiries[i],
'hash': msgs[i]['hash'],
'timestamp': msgs[i]['req']['timestamp'],
}
for i in range(len(msgs))
],
"more": False,
}

View File

@ -128,7 +128,7 @@ def test_ifelse(omq, random_sn, sk, exclude):
})])
def hash(body, ts):
return blake2b(f"{ts}{ts+ttl}".encode() + b'\x05' + sk.verify_key.encode() + body,
return blake2b(b'\x05' + sk.verify_key.encode() + body,
encoder=Base64Encoder).decode().rstrip('=')
for i in range(len(r)):

View File

@ -242,7 +242,7 @@ def test_monitor_push(omq, random_sn, sk, exclude):
s = json.loads(s[0])
hash = (
blake2b(
"{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'abc 123',
b'\x03' + sk.verify_key.encode() + b'abc 123',
encoder=Base64Encoder,
)
.decode()

View File

@ -38,7 +38,7 @@ def test_store_ns(omq, random_sn, sk, exclude):
spub = json.loads(spub.get()[0])
hpub = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'40' + b'abc 123',
hpub = blake2b(b'\x05' + sk.verify_key.encode() + b'40' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert len(spub["swarm"]) == len(swarm['snodes'])
@ -55,7 +55,7 @@ def test_store_ns(omq, random_sn, sk, exclude):
spriv = json.loads(spriv.get()[0])
hpriv = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'-42' + b'abc 123',
hpriv = blake2b(b'\x05' + sk.verify_key.encode() + b'-42' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert len(spriv["swarm"]) == len(swarm['snodes'])
@ -123,7 +123,7 @@ def test_legacy_closed_ns(omq, random_sn, sk, exclude):
"data": base64.b64encode("blah blah".encode()).decode()})])
sclosed = json.loads(sclosed.get()[0])
hash = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'-10' + b'blah blah',
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'-10' + b'blah blah',
encoder=Base64Encoder).decode().rstrip('=')
assert len(sclosed["swarm"]) == len(swarm['snodes'])

View File

@ -25,7 +25,7 @@ def test_store(omq, random_sn, sk, exclude):
assert len(s) == 1
s = json.loads(s[0])
hash = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'abc 123',
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert len(s["swarm"]) == len(swarm['snodes'])
@ -58,7 +58,7 @@ def test_store_retrieve_unauthenticated(omq, random_sn, sk, exclude):
assert len(s) == 1
s = json.loads(s[0])
hash = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + sk.verify_key.encode() + b'abc 123',
hash = blake2b(b'\x05' + sk.verify_key.encode() + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert all(v['hash'] == hash for v in s['swarm'].values())
@ -98,7 +98,7 @@ def test_store_retrieve_authenticated(omq, random_sn, sk, exclude):
assert len(s1) == 1
s1 = json.loads(s1[0])
hash1 = blake2b("{}{}".format(ts, exp).encode() + b'\x05' + xpk.encode() + b'abc 123',
hash1 = blake2b(b'\x05' + xpk.encode() + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert all(v['hash'] == hash1 for v in s1['swarm'].values())
@ -107,7 +107,7 @@ def test_store_retrieve_authenticated(omq, random_sn, sk, exclude):
assert len(s2) == 1
s2 = json.loads(s2[0])
hash2 = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'def 456',
hash2 = blake2b(b'\x03' + sk.verify_key.encode() + b'def 456',
encoder=Base64Encoder).decode().rstrip('=')
to_sign = "retrieve{}".format(ts).encode()
@ -206,7 +206,7 @@ def test_store_retrieve_multiple(omq, random_sn, sk, exclude):
# Store 6 more messages
basemsg = b'another msg'
new_msgs = ss.store_n(omq, conn2, sk, basemsg, 6, 1)
new_msgs = ss.store_n(omq, conn2, sk, basemsg, 6, offset=1)
# Retrieve using a last_hash so that we should get back only the 6:
resp = omq.request_future(conn1, 'storage.retrieve', [json.dumps({
@ -237,3 +237,64 @@ def test_store_retrieve_multiple(omq, random_sn, sk, exclude):
assert len(r) == 1
r = json.loads(r[0])
assert len(r['messages']) == 11
def test_store_sig_timestamp(omq, random_sn, sk, exclude):
"""Tests that sig_timestamp is used properly for the signature both sig_timestamp and timestamp
are given."""
swarm = ss.get_swarm(omq, random_sn, sk)
sn = ss.random_swarm_members(swarm, 1, exclude)[0]
conn = omq.connect_remote(sn_address(sn))
ts = int(time.time() * 1000)
ns = 123
ttl = 86400000
exp = ts + ttl
# Should be fine: timestamp is current, and we sign with it (so timestamp is double double-duty
# as both the message timestamp, and the signature timestamp):
to_sign = f"store{ns}{ts}".encode()
s = omq.request_future(conn, 'storage.store', [json.dumps({
"pubkey": '05' + sk.verify_key.encode().hex(),
"namespace": ns,
"timestamp": ts,
"ttl": ttl,
"data": base64.b64encode("msg1".encode()).decode(),
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
}).encode()]).get()
assert len(s) == 1
s = json.loads(s[0])
assert 'hash' in s
# Simulate a 100s storage delay:
ts -= 100_000
# Fails because timestamp is too old for a store signature:
to_sign = f"store{ns}{ts}".encode()
s = omq.request_future(conn, 'storage.store', [json.dumps({
"pubkey": '05' + sk.verify_key.encode().hex(),
"namespace": ns,
"timestamp": ts,
"ttl": ttl,
"data": base64.b64encode("msg2".encode()).decode(),
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
}).encode()]).get()
assert s == [b'406', b'store signature timestamp too far from current time']
# This should work: sig_timestamp is current, timestamp is old:
sig_ts = int(time.time() * 1000)
to_sign = f"store{ns}{sig_ts}".encode()
s = omq.request_future(conn, 'storage.store', [json.dumps({
"pubkey": '05' + sk.verify_key.encode().hex(),
"namespace": ns,
"timestamp": ts,
"sig_timestamp": sig_ts,
"ttl": ttl,
"data": base64.b64encode("msg3".encode()).decode(),
"signature": sk.sign(to_sign, encoder=Base64Encoder).signature.decode()
}).encode()]).get()
assert len(s) == 1
s = json.loads(s[0])

View File

@ -4,9 +4,9 @@ import subkey
import time
import base64
import json
from nacl.encoding import Base64Encoder
from nacl.encoding import Base64Encoder, HexEncoder
from nacl.hash import blake2b
from nacl.signing import SigningKey
from nacl.signing import SigningKey, VerifyKey
import nacl.exceptions
def test_retrieve_subkey(omq, random_sn, sk, exclude):
@ -30,7 +30,7 @@ def test_retrieve_subkey(omq, random_sn, sk, exclude):
}).encode()]).get()
assert len(s) == 1
s = json.loads(s[0])
hash = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
for k, v in s['swarm'].items():
assert hash == v['hash']
@ -85,7 +85,7 @@ def test_store_subkey(omq, random_sn, sk, exclude):
s = json.loads(s[0])
assert s["hf"] >= [19, 0]
hash = blake2b(f"{ts}{exp}".encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
assert len(s["swarm"]) > 0
for k, v in s['swarm'].items():
@ -181,16 +181,16 @@ def test_revoke_subkey(omq, random_sn, sk, exclude):
}).encode()]).get()
assert len(s) == 1
s = json.loads(s[0])
hash = blake2b("{}{}".format(ts, exp).encode() + b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
hash = blake2b(b'\x03' + sk.verify_key.encode() + b'42' + b'abc 123',
encoder=Base64Encoder).decode().rstrip('=')
for k, v in s['swarm'].items():
assert hash == v['hash']
# Retrieve it using the subkey
dude_sk = SigningKey.generate()
c, d, D = make_subkey(sk, dude_sk.verify_key)
c, d, D = subkey.make_subkey(sk, dude_sk.verify_key)
to_sign = f"retrieve42{ts}".encode()
sig = blinded_ed25519_signature(to_sign, dude_sk, d, D)
sig = subkey.sign(to_sign, dude_sk, d, D)
r = omq.request_future(conn, 'storage.retrieve', [
json.dumps({
@ -243,7 +243,7 @@ def test_revoke_subkey(omq, random_sn, sk, exclude):
# Revoke another 49 subkeys, the original subkey should still fail to retrieve the messages
for i in range (49):
more_dude_sk = SigningKey.generate()
more_c, more_d, D = make_subkey(sk, more_dude_sk.verify_key)
more_c, more_d, D = subkey.make_subkey(sk, more_dude_sk.verify_key)
r = omq.request_future(conn, 'storage.revoke_subkey', [
json.dumps({
"pubkey": '03' + sk.verify_key.encode().hex(),
@ -265,7 +265,7 @@ def test_revoke_subkey(omq, random_sn, sk, exclude):
# Revoke one more subkey, the original subkey should now succeed in retrieving the messages
more_dude_sk = SigningKey.generate()
more_c, more_d, D = make_subkey(sk, more_dude_sk.verify_key)
more_c, more_d, D = subkey.make_subkey(sk, more_dude_sk.verify_key)
r = omq.request_future(conn, 'storage.revoke_subkey', [
json.dumps({
"pubkey": '03' + sk.verify_key.encode().hex(),

6
oxenss/common/format.h Normal file
View File

@ -0,0 +1,6 @@
#pragma once
#include <oxen/log/format.hpp>
// Make ""_format available globally:
using namespace oxen::log::literals;

View File

@ -164,7 +164,9 @@ namespace {
}
#ifndef NDEBUG
constexpr bool check_ascending(std::string_view) { return true; }
constexpr bool check_ascending(std::string_view) {
return true;
}
template <typename... Args>
constexpr bool check_ascending(std::string_view a, std::string_view b, Args&&... args) {
return a < b && check_ascending(b, std::forward<Args>(args)...);
@ -647,14 +649,26 @@ bt_value expire_all::to_bt() const {
template <typename Dict>
static void load(expire_msgs& e, Dict& d) {
auto [expiry, messages, pubkey, pubkey_ed25519, signature, subkey] =
load_fields<TP, Vec<Str>, Str, SV, SV, SV>(
d, "expiry", "messages", "pubkey", "pubkey_ed25519", "signature", "subkey");
auto [expiry, extend, messages, pubkey, pubkey_ed25519, shorten, signature, subkey] =
load_fields<TP, bool, Vec<Str>, Str, SV, bool, SV, SV>(
d,
"expiry",
"extend",
"messages",
"pubkey",
"pubkey_ed25519",
"shorten",
"signature",
"subkey");
load_pk_signature(e, d, pubkey, pubkey_ed25519, signature);
load_subkey(e, d, subkey);
require("expiry", expiry);
e.expiry = std::move(*expiry);
e.shorten = shorten.value_or(false);
e.extend = extend.value_or(false);
if (e.shorten && e.extend)
throw parse_error{"cannot specify both 'shorten' and 'extend'"};
require("messages", messages);
e.messages = std::move(*messages);
if (e.messages.empty())
@ -682,12 +696,38 @@ bt_value expire_msgs::to_bt() const {
if (pubkey_ed25519)
ret["pubkey_ed25519"] = std::string_view{
reinterpret_cast<const char*>(pubkey_ed25519->data()), pubkey_ed25519->size()};
if (shorten)
ret["shorten"] = 1;
if (extend)
ret["extend"] = 1;
if (subkey)
ret["subkey"] =
std::string_view{reinterpret_cast<const char*>(subkey->data()), subkey->size()};
return ret;
}
template <typename Dict>
static void load(get_expiries& ge, Dict& d) {
auto [messages, pubkey, pk_ed25519, sig, subkey, timestamp] =
load_fields<Vec<Str>, Str, SV, SV, SV, TP>(
d, "messages", "pubkey", "pubkey_ed25519", "signature", "subkey", "timestamp");
load_pk_signature(ge, d, pubkey, pk_ed25519, sig);
load_subkey(ge, d, subkey);
require("timestamp", timestamp);
ge.sig_ts = *timestamp;
require("messages", messages);
ge.messages = std::move(*messages);
if (ge.messages.empty())
throw parse_error{"messages does not contain any message hashes"};
}
void get_expiries::load_from(json params) {
load(*this, params);
}
void get_expiries::load_from(bt_dict_consumer params) {
load(*this, params);
}
template <typename Dict>
static void load(get_swarm& g, Dict& d) {
auto [pubKey, pubkey] = load_fields<Str, Str>(d, "pubKey", "pubkey");

View File

@ -521,29 +521,43 @@ struct expire_all final : recursive {
/// on how subkey authentication works.
/// - messages -- array of message hash strings (as provided by the storage server) to update.
/// Messages can be from any namespace(s).
/// - expiry -- the new expiry timestamp (milliseconds since unix epoch). Must be >= 60s ago. This
/// can be used to extend expiries instead of just shortening them. The expiry can be extended to
/// at most the maximum TTL (14 days) from now; specifying a later timestamp will be truncated to
/// the maximum.
/// - expiry -- the new expiry timestamp (milliseconds since unix epoch). Must be >= 60s ago. The
/// new expiry can be anywhere from current time up to the maximum TTL (30 days) from now;
/// specifying a later timestamp will be truncated to the maximum.
/// - shorten -- if provided and set to true then the expiry is only shortened, but not extended.
/// If the expiry is already at or before the given `expiry` timestamp then expiry will not be
/// changed. (This option is only supported starting at network version 19.3). This option is
/// not permitted when using subkey authentication.
/// - extend -- if provided and set to true then the expiry is only extended, but not shortened. If
/// the expiry is already at or beyond the given `expiry` timestamp then expiry will not be
/// changed. (This option is only supported starting at network version 19.3). This option is
/// mutually exclusive of "shorten".
///
/// Note that extend-only mode is always applied when using subkey authentication, but specifying
/// this argument anyway for subkey authentication has two effects: 1) the required signature is
/// different; and 2) "unchanged" will be included in the results.
/// - signature -- Ed25519 signature of:
/// ("expire" || expiry || messages[0] || ... || messages[N])
/// where `expiry` is the expiry timestamp expressed as a string. The signature must be base64
/// encoded (json) or bytes (bt).
/// ("expire" || ShortenOrExtend || expiry || messages[0] || ... || messages[N])
/// where `expiry` is the expiry timestamp expressed as a string. `ShortenOrExtend` is string
/// "shorten" if the shorten option is given (and true), "extend" if `extend` is true, and empty
/// otherwise. The signature must be base64 encoded (json) or bytes (bt).
///
///
/// Returns dict of:
/// - "swarm" dict mapping ed25519 pubkeys (in hex) of swarm members to dict values of:
/// - "failed" and other failure keys -- see `recursive`.
/// - "updated": ascii-sorted list of hashes of matched messages (messages that were not found
/// are not included). When using subkey authentication, only messages that had their
/// expiries extended are included (that is: matched messages that already had a longer expiry
/// are omitted).
/// - "updated": ascii-sorted list of hashes that had their expiries changed (messages that were
/// not found, and messages excluded by the shorten/extend options, are not included).
/// - "unchanged": dict of hashes to current expiries of hashes that were found, but did not get
/// updated expiries due a given "shorten"/"extend" constraint in the request. This field is
/// only included when the "shorten" or "extend" parameter is explicitly given.
/// - "expiry": the expiry timestamp that was applied (which might be different from the request
/// expiry, e.g. if the requested value exceeded the permitted TTL).
/// - "signature": signature of:
/// ( PUBKEY_HEX || EXPIRY || RMSG[0] || ... || RMSG[N] || UMSG[0] || ... || UMSG[M] )
/// where RMSG are the requested expiry hashes and UMSG are the actual updated hashes. The
/// signature uses the node's ed25519 pubkey.
/// ( PUBKEY_HEX || EXPIRY || RMSGs... || UMSGs... || CMSG_EXPs... )
/// where RMSGs are the requested expiry hashes, UMSGs are the actual updated hashes, and
/// CMSG_EXPs are (HASH || EXPIRY) values, ascii-sorted by hash, for the unchanged message
/// hashes included in the "unchanged" field. The signature uses the node's ed25519 pubkey.
struct expire_msgs final : recursive {
static constexpr auto names() { return NAMES("expire"); }
@ -552,6 +566,8 @@ struct expire_msgs final : recursive {
std::optional<std::array<unsigned char, 32>> subkey;
std::vector<std::string> messages;
std::chrono::system_clock::time_point expiry;
bool shorten = false;
bool extend = false;
std::array<unsigned char, 64> signature;
void load_from(nlohmann::json params) override;
@ -559,6 +575,45 @@ struct expire_msgs final : recursive {
oxenc::bt_value to_bt() const override;
};
/// Retrieves the current expiry timestamps of the given messages.
///
/// Takes parameters of:
/// - `pubkey` -- the account
/// - `pubkey_ed25519` if provided *and* the pubkey has a type 05 (i.e. Session id) then `pubkey`
/// will be interpreted as an `x25519` pubkey derived from this given ed25519 pubkey (which must
/// be 64 hex characters or 32 bytes). *This* pubkey should be used for signing, but must also
/// convert to the given `pubkey` value (without the `05` prefix).
/// - `subkey` (optional) allows authentication using a derived subkey. See `store` for details on
/// how subkey authentication works.
/// - `messages` -- array of message hash strings (as provided by the storage server) to update.
/// Messages can be from any namespace(s). You may pass a single message id of "all" to retrieve
/// the timestamps of all
/// - `timestamp` -- the timestamp at which this request was initiated, in milliseconds since unix;
/// must with ±60s of the current time (as with other signature timestamps, using the server time
/// is recommended).
/// - `signature` -- Ed25519 signature of:
/// ("get_expiries" || timestamp || messages[0] || ... || messages[N])
/// where `timestamp` is expressed as a string (base10). The signature must be base64 encoded
/// (json) or bytes (bt).
///
///
/// Returns dict with keys:
/// - "expiries" sub-dict of messageid => expiry (milliseconds since unix epoch) pairs. Only
/// message that exist on the server are included.
struct get_expiries final : endpoint {
static constexpr auto names() { return NAMES("get_expiries"); }
user_pubkey_t pubkey;
std::optional<std::array<unsigned char, 32>> pubkey_ed25519;
std::optional<std::array<unsigned char, 32>> subkey;
std::vector<std::string> messages;
std::chrono::system_clock::time_point sig_ts;
std::array<unsigned char, 64> signature;
void load_from(nlohmann::json params) override;
void load_from(oxenc::bt_dict_consumer params) override;
};
/// Retrieves the swarm information for a given pubkey. Takes keys of:
/// - `pubkey` (required) the pubkey to query, in hex (66) or bytes (33).
struct get_swarm final : endpoint {
@ -603,6 +658,7 @@ using client_rpc_subrequests = type_list<
delete_before,
expire_msgs,
expire_all,
get_expiries,
get_swarm,
oxend_request,
info>;

View File

@ -10,6 +10,7 @@
#include <oxenss/utils/time.hpp>
#include <oxenss/version.h>
#include <oxenss/common/mainnet.h>
#include <oxenss/common/format.h>
#include <chrono>
#include <future>
@ -185,7 +186,12 @@ namespace {
s = stringified_ints[N - sizeof...(More) - 1].size();
else if constexpr (std::is_convertible_v<T, std::string_view>)
s += std::string_view{val}.size();
else {
else if constexpr (std::is_same_v<T, std::map<std::string, int64_t>>) {
for (auto& [k, v] : val) {
s += k.size();
s += 13; // Enough for unix epoch millisecond values up to 2286
}
} else {
static_assert(
std::is_same_v<T, std::vector<std::string>> ||
std::is_same_v<T, std::vector<std::string_view>>);
@ -208,7 +214,12 @@ namespace {
result += stringified_ints[N - sizeof...(More) - 1];
else if constexpr (std::is_convertible_v<T, std::string_view>)
result += std::string_view{val};
else {
else if constexpr (std::is_same_v<T, std::map<std::string, int64_t>>) {
for (auto& [k, v] : val) {
result += k;
"{}"_format_to(result, v);
}
} else {
static_assert(
std::is_same_v<T, std::vector<std::string>> ||
std::is_same_v<T, std::vector<std::string_view>>);
@ -332,7 +343,8 @@ std::string compute_hash_blake2b_b64(std::vector<std::string_view> parts) {
return b64hash;
}
std::string computeMessageHash(
// FIXME: remove this after fully transitioned to HF19.3
std::string computeMessageHash_old(
system_clock::time_point timestamp,
system_clock::time_point expiry,
const user_pubkey_t& pubkey,
@ -353,6 +365,17 @@ std::string computeMessageHash(
data);
}
std::string computeMessageHash(
const user_pubkey_t& pubkey, namespace_id ns, std::string_view data) {
char netid = static_cast<char>(pubkey.type());
std::array<char, 20> ns_buf;
char* ns_buf_ptr = ns_buf.data();
std::string_view ns_for_hash =
ns != namespace_id::Default ? detail::to_hashable(to_int(ns), ns_buf_ptr) : ""sv;
return compute_hash(
compute_hash_blake2b_b64, std::string_view{&netid, 1}, pubkey.raw(), ns_for_hash, data);
}
RequestHandler::RequestHandler(
snode::ServiceNode& sn, const crypto::ChannelEncryption& ce, crypto::ed25519_seckey edsk) :
service_node_{sn}, channel_cipher_(ce), ed25519_sk_{std::move(edsk)} {
@ -487,8 +510,12 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
return cb(handle_wrong_swarm(req.pubkey));
using namespace std::chrono;
bool public_ns = is_public_namespace(req.msg_namespace);
auto ttl = duration_cast<milliseconds>(req.expiry - req.timestamp);
if (ttl < TTL_MINIMUM || ttl > TTL_MAXIMUM) {
auto max_ttl = (!public_ns && service_node_.hf_at_least(snode::HARDFORK_EXTENDED_PRIVATE_TTL))
? TTL_MAXIMUM_PRIVATE
: TTL_MAXIMUM;
if (ttl < TTL_MINIMUM || ttl > max_ttl) {
log::warning(logcat, "Forbidden. Invalid TTL: {}ms", ttl.count());
return cb(Response{http::FORBIDDEN, "Provided expiry/TTL is not valid."sv});
}
@ -498,7 +525,7 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
return cb(Response{http::NOT_ACCEPTABLE, "Timestamp error: check your clock"sv});
}
if (!is_public_namespace(req.msg_namespace)) {
if (!public_ns) {
if (!req.signature) {
auto err = fmt::format(
"store: signature required to store to namespace {}",
@ -506,14 +533,13 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
log::warning(logcat, err);
return cb(Response{http::UNAUTHORIZED, err});
}
if (req.timestamp < now - SIGNATURE_TOLERANCE ||
req.timestamp > now + SIGNATURE_TOLERANCE) {
if (*req.sig_ts < now - SIGNATURE_TOLERANCE || *req.sig_ts > now + SIGNATURE_TOLERANCE) {
log::debug(
logcat,
"store: invalid timestamp ({}s from now)",
"store: invalid signature timestamp ({}s from now)",
duration_cast<seconds>(req.timestamp - now).count());
return cb(
Response{http::NOT_ACCEPTABLE, "store timestamp too far from current time"sv});
return cb(Response{
http::NOT_ACCEPTABLE, "store signature timestamp too far from current time"sv});
}
if (!verify_signature(
@ -524,7 +550,7 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
*req.signature,
"store",
req.msg_namespace == namespace_id::Default ? "" : to_string(req.msg_namespace),
req.timestamp)) {
*req.sig_ts)) {
log::debug(logcat, "store: signature verification failed");
return cb(Response{http::UNAUTHORIZED, "store signature verification failed"sv});
}
@ -537,8 +563,12 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
? res->result["swarm"][service_node_.own_address().pubkey_ed25519.hex()]
: res->result;
// TODO: remove after HF19.3
std::string message_hash =
computeMessageHash(req.timestamp, req.expiry, req.pubkey, req.msg_namespace, req.data);
service_node_.hf_at_least(snode::HARDFORK_HASH_NO_TIME)
? computeMessageHash(req.pubkey, req.msg_namespace, req.data)
: computeMessageHash_old(
req.timestamp, req.expiry, req.pubkey, req.msg_namespace, req.data);
bool new_msg;
bool success = false;
@ -566,13 +596,9 @@ void RequestHandler::process_client_req(rpc::store&& req, std::function<void(Res
req.b64 ? oxenc::to_base64(sig.begin(), sig.end()) : util::view_guts(sig);
if (!new_msg)
mine["already"] = true;
if (entry_router) {
if (entry_router)
// Backwards compat: put the hash at top level, too. TODO: remove eventually
res->result["hash"] = message_hash;
// No longer used, but here to avoid breaking older clients. TODO: remove
// eventually
res->result["difficulty"] = 1;
}
} else {
mine["failed"] = true;
mine["query_failure"] = true;
@ -1091,7 +1117,7 @@ void RequestHandler::process_client_req(rpc::expire_all&& req, std::function<voi
reply_or_fail(std::move(res));
}
void RequestHandler::process_client_req(rpc::expire_msgs&& req, std::function<void(Response)> cb) {
log::debug(logcat, "processing expire_msgs {} request", req.recurse ? "direct" : "forwarded");
log::debug(logcat, "processing expire {} request", req.recurse ? "direct" : "forwarded");
if (!service_node_.is_pubkey_for_us(req.pubkey))
return cb(handle_wrong_swarm(req.pubkey));
@ -1100,12 +1126,25 @@ void RequestHandler::process_client_req(rpc::expire_msgs&& req, std::function<vo
if (req.expiry < now - 1min) {
log::debug(
logcat,
"expire_all: invalid timestamp ({}s ago)",
"expire: invalid timestamp ({}s ago)",
duration_cast<seconds>(now - req.expiry).count());
return cb(
Response{http::UNAUTHORIZED, "expire_msgs timestamp should be >= current time"sv});
return cb(Response{http::UNAUTHORIZED, "expire: timestamp should be >= current time"sv});
}
// TODO: remove after HF19.3
if (!service_node_.hf_at_least(snode::HARDFORK_EXPIRY_SHORTEN_ONLY) &&
(req.shorten || req.extend))
return cb(Response{
http::BAD_REQUEST,
"expire: shorten/extend parameters cannot be used before network version {}.{}"_format(
snode::HARDFORK_EXPIRY_SHORTEN_ONLY.first,
snode::HARDFORK_EXPIRY_SHORTEN_ONLY.second)});
if (req.shorten and req.subkey)
return cb(Response{
http::BAD_REQUEST,
"expire: shorten parameter cannot be used with subkey authentication"sv});
if (!verify_signature(
service_node_.get_db(),
req.pubkey,
@ -1113,10 +1152,13 @@ void RequestHandler::process_client_req(rpc::expire_msgs&& req, std::function<vo
req.subkey,
req.signature,
"expire",
req.shorten ? "shorten"
: req.extend ? "extend"
: "",
req.expiry,
req.messages)) {
log::debug(logcat, "expire_msgs: signature verification failed");
return cb(Response{http::UNAUTHORIZED, "expire_msgs signature verification failed"sv});
log::debug(logcat, "expire: signature verification failed");
return cb(Response{http::UNAUTHORIZED, "expire: signature verification failed"sv});
}
auto [res, lock] = setup_recursive_request(service_node_, req, std::move(cb));
@ -1127,18 +1169,34 @@ void RequestHandler::process_client_req(rpc::expire_msgs&& req, std::function<vo
? res->result["swarm"][service_node_.own_address().pubkey_ed25519.hex()]
: res->result;
auto expiry = std::min(std::chrono::system_clock::now() + TTL_MAXIMUM, req.expiry);
auto max_ttl = service_node_.hf_at_least(snode::HARDFORK_EXTENDED_PRIVATE_TTL)
? TTL_MAXIMUM_PRIVATE
: TTL_MAXIMUM;
auto expiry = std::min(std::chrono::system_clock::now() + max_ttl, req.expiry);
auto updated = service_node_.get_db().update_expiry(
req.pubkey,
req.messages,
expiry,
/*extend_only=*/req.subkey.has_value() // can only extend when using a subkey
);
/*extend_only=*/req.extend || req.subkey,
/*shorten_only=*/req.shorten);
std::sort(updated.begin(), updated.end());
auto sig =
create_signature(ed25519_sk_, req.pubkey.prefixed_hex(), expiry, req.messages, updated);
std::map<std::string, int64_t> unchanged;
if (req.extend || req.shorten) {
std::vector<std::string> unchanged_hashes;
for (const auto& m : req.messages)
if (!std::binary_search(updated.begin(), updated.end(), m))
unchanged_hashes.push_back(m);
if (!unchanged_hashes.empty())
unchanged = service_node_.get_db().get_expiries(req.pubkey, unchanged_hashes);
}
auto sig = create_signature(
ed25519_sk_, req.pubkey.prefixed_hex(), expiry, req.messages, updated, unchanged);
mine["expiry"] = to_epoch_ms(expiry);
mine["updated"] = std::move(updated);
if (req.shorten || req.extend)
mine["unchanged"] = std::move(unchanged);
mine["signature"] = req.b64 ? oxenc::to_base64(sig.begin(), sig.end()) : util::view_guts(sig);
if (req.recurse)
add_misc_response_fields(res->result, service_node_, now);
@ -1147,6 +1205,40 @@ void RequestHandler::process_client_req(rpc::expire_msgs&& req, std::function<vo
reply_or_fail(std::move(res));
}
void RequestHandler::process_client_req(rpc::get_expiries&& req, std::function<void(Response)> cb) {
log::debug(logcat, "processing get_expiries request");
if (!service_node_.is_pubkey_for_us(req.pubkey))
return cb(handle_wrong_swarm(req.pubkey));
auto now = system_clock::now();
if (req.sig_ts < now - SIGNATURE_TOLERANCE || req.sig_ts > now + SIGNATURE_TOLERANCE) {
log::debug(
logcat,
"get_expiries: invalid timestamp ({}s from now)",
duration_cast<seconds>(req.sig_ts - now).count());
return cb(Response{
http::NOT_ACCEPTABLE, "get_expiries timestamp too far from current time"sv});
}
if (!verify_signature(
service_node_.get_db(),
req.pubkey,
req.pubkey_ed25519,
req.subkey,
req.signature,
"get_expiries",
req.sig_ts,
req.messages)) {
log::debug(logcat, "get_expiries: signature verification failed");
return cb(Response{http::UNAUTHORIZED, "get_expiries signature verification failed"sv});
}
json res = json::object();
res["expiries"] = service_node_.get_db().get_expiries(req.pubkey, req.messages);
return cb(Response{http::OK, std::move(res)});
}
void RequestHandler::process_client_req(rpc::batch&& req, std::function<void(rpc::Response)> cb) {
assert(!req.subreqs.empty());

View File

@ -8,6 +8,7 @@
#include <oxenss/snode/service_node.h>
#include <oxenss/utils/string_utils.hpp>
#include <oxenss/server/utils.h>
#include <oxenss/utils/time.hpp>
#include <chrono>
#include <forward_list>
@ -28,18 +29,22 @@ inline constexpr auto TEST_RETRY_INTERVAL = 50ms;
// we give up and send an error response back to the requestor:
inline constexpr auto TEST_RETRY_PERIOD = 55s;
// Minimum and maximum TTL permitted for a message storage request
// Minimum and maximum TTL permitted for storing a new, public message
inline constexpr auto TTL_MINIMUM = 10s;
inline constexpr auto TTL_MAXIMUM = 14 * 24h;
// For messages in a user's control (i.e. new messages in private namespaces, or updating TTLs of
// existing public or private namespace messages) we allow a longer TTL (starting at HF19.3).
inline constexpr auto TTL_MAXIMUM_PRIVATE = 30 * 24h;
// Tolerance for store requests: we don't allow stores with a timestamp more than this into the
// future, and don't allow stores with an expiry in the past by more than this amount.
inline constexpr auto STORE_TOLERANCE = 10s;
// Tolerance for timestamp-dependent, signed requests (such as `delete_all`); we accept the
// initial request if within SIGNATURE_TOLERANCE of now, and accept a recursive request if
// within SIGNATURE_TOLERANCE_FORWARDED (generally slightly larger to account for swarm
// forwarding latency).
// Tolerance for timestamp-dependent, signed requests (such as storing to a non-public namespace,
// `delete_all`, etc.); we accept the initial request if within SIGNATURE_TOLERANCE of now, and
// accept a recursive request if within SIGNATURE_TOLERANCE_FORWARDED (generally slightly larger to
// account for swarm forwarding latency).
inline constexpr auto SIGNATURE_TOLERANCE = 60s;
inline constexpr auto SIGNATURE_TOLERANCE_FORWARDED = 70s;
@ -54,7 +59,7 @@ inline constexpr auto SIGNATURE_TOLERANCE_FORWARDED = 70s;
inline constexpr int RETRIEVE_MAX_SIZE = 7'800'000;
// Maximum subrequests that can be stuffed into a single batch request
inline constexpr size_t BATCH_REQUEST_MAX = 5;
inline constexpr size_t BATCH_REQUEST_MAX = 20;
// Simpler wrapper that works for most of our responses
struct Response {
@ -99,10 +104,7 @@ namespace detail {
}
inline std::string_view to_hashable(
const std::chrono::system_clock::time_point& val, char*& buffer) {
return to_hashable(
std::chrono::duration_cast<std::chrono::milliseconds>(val.time_since_epoch())
.count(),
buffer);
return to_hashable(to_epoch_ms(val), buffer);
}
template <typename T, std::enable_if_t<std::is_convertible_v<T, std::string_view>, int> = 0>
std::string_view to_hashable(const T& value, char*&) {
@ -133,14 +135,18 @@ std::string compute_hash(Func hasher, const T&... args) {
return hasher({detail::to_hashable(args, b)...});
}
/// Computes a message hash using blake2b hash of various messages attributes.
std::string computeMessageHash(
/// Computes a message hash using blake2b hash of various messages attributes. This is the
/// pre-HF19.3 version, which includes timestamp/expiry. (TODO: delete after HF19.3).
std::string computeMessageHash_old(
std::chrono::system_clock::time_point timestamp,
std::chrono::system_clock::time_point expiry,
const user_pubkey_t& pubkey,
namespace_id ns,
std::string_view data);
/// Computes a message hash using blake2b hash of various messages attributes.
std::string computeMessageHash(const user_pubkey_t& pubkey, namespace_id ns, std::string_view data);
struct OnionRequestMetadata {
crypto::x25519_pubkey ephem_key;
std::function<void(Response)> cb;
@ -198,6 +204,7 @@ class RequestHandler {
void process_client_req(rpc::delete_before&&, std::function<void(Response)> cb);
void process_client_req(rpc::expire_all&&, std::function<void(Response)> cb);
void process_client_req(rpc::expire_msgs&&, std::function<void(Response)> cb);
void process_client_req(rpc::get_expiries&&, std::function<void(Response)> cb);
void process_client_req(rpc::batch&&, std::function<void(Response)> cb);
void process_client_req(rpc::sequence&&, std::function<void(Response)> cb);
void process_client_req(rpc::ifelse&&, std::function<void(Response)> cb);

View File

@ -236,7 +236,7 @@ oxenc::bt_value json_to_bt(nlohmann::json j) {
}
nlohmann::json bt_to_json(oxenc::bt_dict_consumer d) {
nlohmann::json j;
nlohmann::json j = nlohmann::json::object();
while (!d.is_finished()) {
std::string key{d.key()};
if (d.is_string())

View File

@ -120,7 +120,7 @@ static block_update parse_swarm_update(const std::string& response_body) {
std::map<swarm_id_t, std::vector<sn_record>> swarm_map;
block_update bu;
log::trace(logcat, "swarm repsonse: <{}>", response_body);
log::trace(logcat, "swarm response: <{}>", response_body);
try {
json result = json::parse(response_body, nullptr, true);
@ -146,8 +146,9 @@ static block_update parse_swarm_update(const std::string& response_body) {
total++;
const auto& pk_hex = sn_json.at("service_node_pubkey").get_ref<const std::string&>();
const auto& pk_x25519_hex = sn_json.at("pubkey_x25519").get_ref<const std::string&>();
const auto& pk_ed25519_hex = sn_json.at("pubkey_ed25519").get_ref<const std::string&>();
const auto pk_x25519_hex = sn_json.value<std::string>("pubkey_x25519", "");
const auto pk_ed25519_hex = sn_json.value<std::string>("pubkey_ed25519", "");
if (pk_x25519_hex.empty() || pk_ed25519_hex.empty()) {
// These will always either both be present or neither present. If they are

View File

@ -46,16 +46,18 @@ inline constexpr uint64_t TEST_BLOCKS_BUFFER = 4;
using hf_revision = std::pair<int, int>;
// The earliest hardfork *this* version of storage server will work on:
inline constexpr hf_revision STORAGE_SERVER_HARDFORK = {18, 1};
inline constexpr hf_revision STORAGE_SERVER_HARDFORK = {19, 1};
// The hardfork at which msg namespaces become available for storing messages. (Prior to this we
// ignore any specified namespace value and just store in namespace 0, even if another namespace was
// given).
inline constexpr hf_revision HARDFORK_NAMESPACES = {19, 0};
// The hardfork at which we start allowing 30d TTLs in private namespaces.
inline constexpr hf_revision HARDFORK_EXTENDED_PRIVATE_TTL = {19, 3};
// The hardfork at which we require authentication for (almost) all retrieval. (Message namespace
// -10 is temporarily exempt for closed group backwards support).
inline constexpr hf_revision HARDFORK_RETRIEVE_AUTH = {19, 1};
// The hardfork at which we allow the `shorten=1` argument in expiries to only shorten (but not
// extend) expiries.
inline constexpr hf_revision HARDFORK_EXPIRY_SHORTEN_ONLY = {19, 3};
// Starting at this hf the message hash generator changes to not include timestamp/expiry for better
// de-duplication (this is transparent to clients).
inline constexpr hf_revision HARDFORK_HASH_NO_TIME = {19, 3};
class Swarm;

View File

@ -210,6 +210,17 @@ namespace {
return results;
}
// Similar to get_all<K, V>, but returns a std::map<K, V> rather than a std::vector<pair<K, V>>.
template <typename K, typename V, typename... Bind>
std::map<K, V> get_map(SQLite::Statement& st, const Bind&... bind) {
[[maybe_unused]] int i = 1;
(bind_oneshot(st, i, bind), ...);
std::map<K, V> results;
while (st.executeStep())
results[static_cast<K>(st.getColumn(0))] = static_cast<V>(st.getColumn(1));
return results;
}
} // namespace
class DatabaseImpl {
@ -843,14 +854,17 @@ std::vector<std::string> Database::update_expiry(
const user_pubkey_t& pubkey,
const std::vector<std::string>& msg_hashes,
std::chrono::system_clock::time_point new_exp,
bool extend_only) {
bool extend_only,
bool shorten_only) {
auto new_exp_ms = to_epoch_ms(new_exp);
auto expiry_constraint = extend_only ? " AND expiry < ?1"s
: shorten_only ? " AND expiry > ?1"s
: ""s;
if (msg_hashes.size() == 1) {
// Pre-prepared version for the common single hash case
auto st = impl->prepared_st(
"UPDATE messages SET expiry = ? WHERE hash = ?"s +
(extend_only ? " AND expiry < ?1" : "") +
"UPDATE messages SET expiry = ? WHERE hash = ?"s + expiry_constraint +
" AND owner = (SELECT id FROM owners WHERE pubkey = ? AND type = ?)"
" RETURNING hash");
return get_all<std::string>(st, new_exp_ms, msg_hashes[0], pubkey);
@ -861,8 +875,7 @@ std::vector<std::string> Database::update_expiry(
multi_in_query(
"UPDATE messages SET expiry = ?"
" WHERE owner = (SELECT id FROM owners WHERE pubkey = ? AND type = ?)"s +
(extend_only ? " AND expiry < ?1" : "") +
" AND hash IN (", // ?,?,?,...,?
expiry_constraint + " AND hash IN (", // ?,?,?,...,?
msg_hashes.size(),
") RETURNING hash"sv)};
st.bind(1, new_exp_ms);
@ -873,6 +886,31 @@ std::vector<std::string> Database::update_expiry(
return get_all<std::string>(st);
}
std::map<std::string, int64_t> Database::get_expiries(
const user_pubkey_t& pubkey, const std::vector<std::string>& msg_hashes) {
if (msg_hashes.size() == 1) {
// Pre-prepared version for the common single hash case
auto st = impl->prepared_st(
"SELECT hash, expiry FROM messages WHERE hash = ?"
" AND owner = (SELECT id FROM owners WHERE pubkey = ? AND type = ?)");
return get_map<std::string, int64_t>(st);
}
SQLite::Statement st{
impl->db,
multi_in_query(
"SELECT hash, expiry FROM messages"
" WHERE owner = (SELECT id FROM owners WHERE pubkey = ? AND type = ?)"
" AND hash IN ("sv, // ?,?,?,...,?
msg_hashes.size(),
")"sv)};
bind_pubkey(st, 1, 2, pubkey);
for (size_t i = 0; i < msg_hashes.size(); i++)
st.bindNoCopy(3 + i, msg_hashes[i]);
return get_map<std::string, int64_t>(st);
}
std::vector<std::pair<namespace_id, std::string>> Database::update_all_expiries(
const user_pubkey_t& pubkey, std::chrono::system_clock::time_point new_exp) {
auto new_exp_ms = to_epoch_ms(new_exp);

View File

@ -5,6 +5,7 @@
#include <chrono>
#include <cstdint>
#include <filesystem>
#include <map>
#include <memory>
#include <optional>
#include <string>
@ -128,15 +129,17 @@ class Database {
bool subkey_revoked(const std::array<unsigned char, 32>& revoke_subkey);
// Updates the expiry time of the given messages owned by the given pubkey. Returns a vector of
// hashes of found messages (i.e. hashes that don't exist are not returned).
// hashes of updated messages (i.e. hashes that don't exist, or were not updated, are not
// returned).
//
// If extend_only is given as true, then only messages that have a current expiry less than the
// new expiry are updated (i.e. it will only extend expiries).
// extend_only and shorten_only allow message expiries to only be adjusted in one way or the
// other. They are mutually exclusive.
std::vector<std::string> update_expiry(
const user_pubkey_t& pubkey,
const std::vector<std::string>& msg_hashes,
std::chrono::system_clock::time_point new_exp,
bool extend_only = false);
bool extend_only = false,
bool shorten_only = false);
// Shortens the expiry time of all messages owned by the given pubkey. Expiries can only be
// shortened (i.e. brought closer to now), not extended into the future. Returns a vector of
@ -151,6 +154,11 @@ class Database {
const user_pubkey_t& pubkey,
namespace_id ns,
std::chrono::system_clock::time_point new_exp);
// Retrieves the expiries of messages by hash. Returns a map of hash -> expiry (hashes not
// found are not included).
std::map<std::string, int64_t> get_expiries(
const user_pubkey_t& pubkey, const std::vector<std::string>& msg_hashes);
};
} // namespace oxen

View File

@ -1,3 +1,5 @@
#pragma once
#include <chrono>
#include <cstdint>

View File

@ -78,11 +78,7 @@ TEST_CASE("service nodes - message hashing", "[service-nodes][messages]") {
"d9+vvm7X+8vh+jIenJfjxf+8CWER+9adNfb4YUH07I+godNCV0O0J05gzqfKdT7J8MBZzFBtKrbk8oCagPpTs"
"q/wZyYFKFKKD+q+zh704dYBILvs5yXUA96pIAA=");
auto expected = "rY7K5YXNsg7d8LBP6R4OoOr6L7IMFxa3Tr8ca5v5nBI";
CHECK(oxen::rpc::computeMessageHash(timestamp, expiry, pk, oxen::namespace_id::Default, data) ==
expected);
CHECK(oxen::rpc::compute_hash_blake2b_b64(
{std::to_string(oxen::to_epoch_ms(timestamp)) +
std::to_string(oxen::to_epoch_ms(expiry)) + pk.prefixed_raw() + data}) ==
expected);
auto expected = "4sMyAuaZlMwww3oFvfhazfw7ASx/7TDtO+TVc8aAjHs";
CHECK(oxen::rpc::computeMessageHash(pk, oxen::namespace_id::Default, data) == expected);
CHECK(oxen::rpc::compute_hash_blake2b_b64({pk.prefixed_raw() + data}) == expected);
}