Compare commits

...

8 Commits
v2.5.0 ... dev

Author SHA1 Message Date
Jason Rhinelander 0dada891a6
Bump oxen-mq to latest release
Fixes an issue with recent macos not liking oxenmq's RNG code.
2023-07-17 13:54:02 -03:00
Jason Rhinelander ab1a84f6c5
Fix comment about the hash in use 2023-04-21 18:29:40 -03:00
Jason Rhinelander 0ff8fb8922
Remove TIMESTAMP||EXPIRY|| from store endpoint docs
They aren't used anymore as of the latest version.
2023-02-21 18:49:27 -04:00
Jason Rhinelander b0015833f1
RPC doc fixes
- Batch requests now allow up to 20 subrequests, not just 5.
- Add an example showing how different mailbox retrieval limits can
  reasonably differ.
2023-02-21 18:18:05 -04:00
Jason Rhinelander 13effb23e8
Merge pull request #473 from mdPlusPlus/dev
Fix more typos
2023-02-15 21:10:29 -04:00
mdPlusPlus 8e72692dc6 Fix more typos 2023-02-16 00:23:30 +01:00
Jason Rhinelander fc32085574
Merge pull request #471 from mdPlusPlus/dev
Fix typo
2023-02-08 17:59:34 -04:00
mdPlusPlus a0bccc0a38 Fix typo 2023-02-07 18:41:05 +01:00
12 changed files with 25 additions and 21 deletions

2
external/oxen-mq vendored

@ -1 +1 @@
Subproject commit ac6ef82ff6fd20437b7d073466dbef82a95a2173
Subproject commit 4f6dc35ea13722a5f9dcd0c3d65b6b7ac3d0f0c5

View File

@ -28,7 +28,7 @@ inline constexpr std::string_view to_string(EncryptType type) {
return ""sv;
}
// Encryption/decription class for encryption/decrypting outgoing/incoming messages.
// Encryption/decryption class for encryption/decrypting outgoing/incoming messages.
class ChannelEncryption {
public:
ChannelEncryption(x25519_seckey private_key, x25519_pubkey public_key, bool server = true) :

View File

@ -114,10 +114,10 @@ std::array<unsigned char, 32> subkey_verify_key(
const unsigned char* pubkey, const unsigned char* subkey) {
std::array<unsigned char, 32> subkey_pub;
// Need to compute: (c + H("OxenSSSubkey" || c || A)) A and use that instead of A for
// Need to compute: (c + H(c || A, key="OxenSSSubkey")) A and use that instead of A for
// verification:
// H("OxenSSSubkey" || c || A):
// H(c || A, key="OxenSSSubkey"):
crypto_generichash_state h_state;
crypto_generichash_init(
&h_state,

View File

@ -60,7 +60,7 @@ struct no_args : endpoint {
/// - "timeout": true if the inter-swarm request timed out
/// - "code": X if the inter-swarm request returned error code X
/// - "reason": a reason string, e.g. propagating a thrown exception messages
/// - "bad_peer_response": true if the peer returned an unparseable response
/// - "bad_peer_response": true if the peer returned an unparsable response
/// - "query_failure": true if the database failed to perform the query
struct recursive : endpoint {
// True on the initial client request, false on forwarded requests
@ -143,7 +143,7 @@ namespace {
/// - "swarm" dict mapping ed25519 pubkeys (in hex) of swarm members to dict values of:
/// - "failed" and other failure keys -- see `recursive`.
/// - "hash": the hash of the stored message; will be an unpadded base64-encode blake2b hash of
/// (TIMESTAMP || EXPIRY || PUBKEY || NAMESPACE || DATA), where PUBKEY is in bytes (not hex!);
/// (PUBKEY || NAMESPACE || DATA), where PUBKEY is in bytes (not hex!);
/// DATA is in bytes (not base64); and NAMESPACE is empty for namespace 0, and otherwise is
/// the decimal representation of the namespace index.
/// - "signature": signature of the returned "hash" value (i.e. not in decoded bytes). Returned
@ -198,6 +198,10 @@ struct store final : recursive {
/// namespaces then specify `"max_size": -5` on each of them to ensure that, if all are full, you
/// will not exceed network limits.
///
/// Alternatively, if some are expected to be larger than others, you could use different
/// fractions that add up to <= 1. For example, -2 on a large mailbox (for 1/2 the limit) and
/// -10 on five smaller mailboxes so that that maximum returned data is 1/2 + 5*(1/10) = 1.
///
/// When both `max_count` and `max_size` are specified then the returned message count will not
/// exceed either limit.
///
@ -672,8 +676,8 @@ using client_subrequest = type_list_variant_t<client_rpc_subrequests>;
/// Note that requests may be performed in parallel or out of order; if you need sequential requests
/// use "sequence" instead.
///
/// This request takes an object containing a single key "requests" which contains a list of 1 to 5
/// elements to invoke up to 5 subrequests. Each element is a dict containing keys:
/// This request takes an object containing a single key "requests" which contains a list of 1 to 20
/// elements to invoke up to 20 subrequests. Each element is a dict containing keys:
///
/// - "method" -- the method name, e.g. "retrieve".
/// - "params" -- the parameters to pass to the subrequest.
@ -712,7 +716,7 @@ using client_subrequest = type_list_variant_t<client_rpc_subrequests>;
/// The batch request itself returns a 200 status code if the batch was processed, regardless of the
/// return value of the individual subrequests (i.e. you get a 200 back even if all subrequests
/// returned error codes). Error statuses are returned only for bad batch requests (e.g. missing
/// method/params arguments, invalid/unparseable subrequests, or too many subrequests).
/// method/params arguments, invalid/unparsable subrequests, or too many subrequests).
///
/// Note that batch requests may not recurse (i.e. you cannot invoke the batch endpoint as a batch
/// subrequest).

View File

@ -39,7 +39,7 @@ static bool fill_bucket(
TokenBucket& bucket, steady_clock::time_point now, bool service_node = false) {
auto elapsed_us = duration_cast<microseconds>(now - bucket.last_time_point);
// clamp elapsed time to how long it takes to fill up the whole bucket
// (simplifies overlow checking)
// (simplifies overflow checking)
elapsed_us = std::min(elapsed_us, FILL_EMPTY_BUCKET_US);
const auto token_period = service_node ? TOKEN_PERIOD_SN_US : TOKEN_PERIOD_US;

View File

@ -445,7 +445,7 @@ static void distribute_command(
} catch (const std::exception& e) {
log::warning(
logcat,
"Received unparseable response to {} from {}: {}",
"Received unparsable response to {} from {}: {}",
cmd,
peer.pubkey_legacy,
e.what());
@ -644,7 +644,7 @@ void RequestHandler::process_client_req(
logcat,
"Invalid oxend response to client request: result is not valid "
"json");
return cb({http::BAD_GATEWAY, "oxend returned unparseable data"s});
return cb({http::BAD_GATEWAY, "oxend returned unparsable data"s});
}
json res{{"result", std::move(result)}};
add_misc_response_fields(res, service_node_);

View File

@ -52,10 +52,10 @@ inline constexpr auto SIGNATURE_TOLERANCE_FORWARDED = 70s;
// several ways with multiple layers of base64 encoding that can occur: first, for json, the body
// gets base64 encoded, but then onion requests (by default) also b64 encode the encrypted payload,
// so we might have double base64 encoding. We include the first b64 encoding overhead in our size
// calculation, but not the second, and so this value is reduced to accomodate it.
// calculation, but not the second, and so this value is reduced to accommodate it.
//
// The maximum network message size is 10MiB, which means the max before b64 encoding is 7.5MiB
// (7864320). We allow for some respoinse overhead, which lands us on this effective maximum:
// (7864320). We allow for some response overhead, which lands us on this effective maximum:
inline constexpr int RETRIEVE_MAX_SIZE = 7'800'000;
// Maximum subrequests that can be stuffed into a single batch request
@ -259,7 +259,7 @@ class RequestHandler {
// a failure response with a body of the error string.
void process_oxend_request(const nlohmann::json& params, std::function<void(Response)> cb);
// Test only: retrieve all db entires
// Test only: retrieve all db entries
Response process_retrieve_all();
// The result will arrive asynchronously, so it needs a callback handler

View File

@ -57,7 +57,7 @@ void OMQ::handle_sn_data(oxenmq::Message& message) {
ss << part;
}
// TODO: proces push batch should move to "Request handler"
// TODO: process push batch should move to "Request handler"
service_node_->process_push_batch(ss.str());
log::debug(logcat, "[LMQ] send reply");
@ -407,7 +407,7 @@ OMQ::OMQ(
// endpoint:
omq_.add_category("notify", oxenmq::AuthLevel::admin)
.add_request_command("block", [this](auto&&) {
log::debug(logcat, "Recieved new block notification from oxend, updating swarms");
log::debug(logcat, "Received new block notification from oxend, updating swarms");
if (service_node_) service_node_->update_swarms();
});

View File

@ -21,7 +21,7 @@ namespace {
auto logcat = log::Cat("server");
/* Add extension using V3 code: we can set the config file as NULL
* because we wont reference any other sections.
* because we won't reference any other sections.
*/
int add_ext(X509* cert, int nid, char* value) {

View File

@ -220,7 +220,7 @@ class ServiceNode {
//
// If this ServiceNode was created with force_start enabled then this function always
// returns true (except when shutting down); the reason string is still set (when non-null)
// when errors would have occured without force_start.
// when errors would have occurred without force_start.
bool snode_ready(std::string* reason = nullptr);
// Puts the storage server into shutdown mode; this operation is irreversible and should

View File

@ -254,7 +254,7 @@ const SwarmInfo* get_swarm_by_pk(
const uint64_t res = pubkey_to_swarm_space(pk);
// NB: this code used to be far more convoluted by trying to accomodate the INVALID_SWARM_ID
// NB: this code used to be far more convoluted by trying to accommodate the INVALID_SWARM_ID
// value, but that was wrong (because pubkeys map to the *full* uint64_t range, including
// INVALID_SWARM_ID), more complicated, and didn't calculate distances properly when wrapping
// around (in generally, but catastrophically for the INVALID_SWARM_ID value).

View File

@ -27,7 +27,7 @@ TEST_CASE("onion request - final destination", "[onion][final]") {
}
// Provided "host", so the request should go
// to an extrenal server. Default values will
// to an external server. Default values will
// be used for port and protocol.
TEST_CASE("onion request - relay to server (legacy)", "[onion][relay]") {
auto data = prefix + R"#({