oxen-core/src/cryptonote_core/cryptonote_core.cpp

2698 lines
117 KiB
C++
Raw Permalink Normal View History

// Copyright (c) 2014-2019, The Monero Project
2018-04-10 06:49:20 +02:00
// Copyright (c) 2018, The Loki Project
//
2014-07-23 15:03:52 +02:00
// All rights reserved.
//
2014-07-23 15:03:52 +02:00
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
2014-07-23 15:03:52 +02:00
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
2014-07-23 15:03:52 +02:00
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
2014-07-23 15:03:52 +02:00
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
2014-07-23 15:03:52 +02:00
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
2014-07-23 15:03:52 +02:00
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2014-03-03 23:07:58 +01:00
2023-04-13 15:50:13 +02:00
#include <oxenc/base32z.h>
#include <boost/algorithm/string.hpp>
2023-04-13 15:50:13 +02:00
#include <iomanip>
#include <unordered_set>
#include "epee/string_tools.h"
2014-03-03 23:07:58 +01:00
extern "C" {
#include <sodium.h>
#ifdef ENABLE_SYSTEMD
2023-04-13 15:50:13 +02:00
#include <systemd/sd-daemon.h>
#endif
}
2023-04-13 15:50:13 +02:00
#include <fmt/color.h>
#include <oxenmq/fmt.h>
#include <sqlite3.h>
2023-04-13 15:50:13 +02:00
#include <csignal>
#include "blockchain_db/blockchain_db.h"
#include "blockchain_db/sqlite/db_sqlite.h"
#include "checkpoints/checkpoints.h"
#include "common/base58.h"
#include "common/command_line.h"
#include "common/file.h"
#include "common/fs-format.h"
2023-04-13 15:50:13 +02:00
#include "common/hex.h"
#include "common/i18n.h"
#include "common/notify.h"
#include "common/sha256sum.h"
#include "common/threadpool.h"
2014-03-03 23:07:58 +01:00
#include "crypto/crypto.h"
#include "cryptonote_basic/hardfork.h"
2023-04-13 15:50:13 +02:00
#include "cryptonote_config.h"
#include "cryptonote_core.h"
#include "epee/memwipe.h"
#include "epee/net/local_ip.h"
2023-04-13 15:50:13 +02:00
#include "epee/warnings.h"
#include "logging/oxen_logger.h"
2023-04-13 15:50:13 +02:00
#include "ringct/rctSigs.h"
#include "ringct/rctTypes.h"
#include "uptime_proof.h"
#include "version.h"
Change logging to easylogging++ This replaces the epee and data_loggers logging systems with a single one, and also adds filename:line and explicit severity levels. Categories may be defined, and logging severity set by category (or set of categories). epee style 0-4 log level maps to a sensible severity configuration. Log files now also rotate when reaching 100 MB. To select which logs to output, use the MONERO_LOGS environment variable, with a comma separated list of categories (globs are supported), with their requested severity level after a colon. If a log matches more than one such setting, the last one in the configuration string applies. A few examples: This one is (mostly) silent, only outputting fatal errors: MONERO_LOGS=*:FATAL This one is very verbose: MONERO_LOGS=*:TRACE This one is totally silent (logwise): MONERO_LOGS="" This one outputs all errors and warnings, except for the "verify" category, which prints just fatal errors (the verify category is used for logs about incoming transactions and blocks, and it is expected that some/many will fail to verify, hence we don't want the spam): MONERO_LOGS=*:WARNING,verify:FATAL Log levels are, in decreasing order of priority: FATAL, ERROR, WARNING, INFO, DEBUG, TRACE Subcategories may be added using prefixes and globs. This example will output net.p2p logs at the TRACE level, but all other net* logs only at INFO: MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE Logs which are intended for the user (which Monero was using a lot through epee, but really isn't a nice way to go things) should use the "global" category. There are a few helper macros for using this category, eg: MGINFO("this shows up by default") or MGINFO_RED("this is red"), to try to keep a similar look and feel for now. Existing epee log macros still exist, and map to the new log levels, but since they're used as a "user facing" UI element as much as a logging system, they often don't map well to log severities (ie, a log level 0 log may be an error, or may be something we want the user to see, such as an important info). In those cases, I tried to use the new macros. In other cases, I left the existing macros in. When modifying logs, it is probably best to switch to the new macros with explicit levels. The --log-level options and set_log commands now also accept category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
2014-03-03 23:07:58 +01:00
DISABLE_VS_WARNINGS(4355)
#define BAD_SEMANTICS_TXES_MAX_SIZE 100
// basically at least how many bytes the block itself serializes to without the miner tx
#define BLOCK_SIZE_SANITY_LEEWAY 100
2023-04-13 15:50:13 +02:00
namespace cryptonote {
static auto logcat = log::Cat("cn");
static auto omqlogcat = log::Cat("omq");
const command_line::arg_descriptor<bool, false> arg_testnet_on = {
"testnet", "Run on testnet. The wallet must be launched with --testnet flag.", false};
const command_line::arg_descriptor<bool, false> arg_devnet_on = {
"devnet", "Run on devnet. The wallet must be launched with --devnet flag.", false};
const command_line::arg_descriptor<bool> arg_regtest_on = {
"regtest", "Run in a regression testing mode.", false};
const command_line::arg_descriptor<bool> arg_keep_fakechain = {
"keep-fakechain", "Don't delete any existing database when in fakechain mode.", false};
const command_line::arg_descriptor<difficulty_type> arg_fixed_difficulty = {
"fixed-difficulty", "Fixed difficulty used for testing.", 0};
const command_line::arg_descriptor<bool> arg_dev_allow_local = {
"dev-allow-local-ips",
"Allow a local IPs for local and received service node public IP (for local testing only)",
false};
const command_line::arg_descriptor<std::string, false, true, 2> arg_data_dir = {
"data-dir",
"Specify data directory",
tools::get_default_data_dir().u8string(),
{{&arg_testnet_on, &arg_devnet_on}},
[](std::array<bool, 2> testnet_devnet, bool defaulted, std::string val) -> std::string {
if (testnet_devnet[0])
return (fs::u8path(val) / "testnet").u8string();
else if (testnet_devnet[1])
return (fs::u8path(val) / "devnet").u8string();
return val;
}};
const command_line::arg_descriptor<bool> arg_offline = {
"offline", "Do not listen for peers, nor connect to any"};
const command_line::arg_descriptor<size_t> arg_block_download_max_size = {
"block-download-max-size",
"Set maximum size of block download queue in bytes (0 for default)",
0};
static const command_line::arg_descriptor<bool> arg_test_drop_download = {
"test-drop-download",
"For net tests: in download, discard ALL blocks instead checking/saving them (very fast)"};
static const command_line::arg_descriptor<uint64_t> arg_test_drop_download_height = {
"test-drop-download-height",
"Like test-drop-download but discards only after around certain height",
0};
static const command_line::arg_descriptor<uint64_t> arg_fast_block_sync = {
"fast-block-sync", "Sync up most of the way by using embedded, known block hashes.", 1};
static const command_line::arg_descriptor<uint64_t> arg_prep_blocks_threads = {
"prep-blocks-threads",
"Max number of threads to use when preparing block hashes in groups.",
4};
static const command_line::arg_descriptor<uint64_t> arg_show_time_stats = {
"show-time-stats",
"Show time-stats when processing blocks/txs and disk synchronization.",
0};
static const command_line::arg_descriptor<size_t> arg_block_sync_size = {
"block-sync-size",
"How many blocks to sync at once during chain synchronization (0 = adaptive).",
0};
static const command_line::arg_descriptor<bool> arg_pad_transactions = {
"pad-transactions",
"Pad relayed transactions to help defend against traffic volume analysis",
false};
static const command_line::arg_descriptor<size_t> arg_max_txpool_weight = {
"max-txpool-weight", "Set maximum txpool weight in bytes.", DEFAULT_MEMPOOL_MAX_WEIGHT};
static const command_line::arg_descriptor<bool> arg_service_node = {
"service-node", "Run as a service node, option 'service-node-public-ip' must be set"};
static const command_line::arg_descriptor<std::string> arg_public_ip = {
"service-node-public-ip",
"Public IP address on which this service node's services (such as the Loki "
"storage server) are accessible. This IP address will be advertised to the "
"network via the service node uptime proofs. Required if operating as a "
"service node."};
static const command_line::arg_descriptor<uint16_t> arg_storage_server_port = {
"storage-server-port", "Deprecated option, ignored.", 0};
static const command_line::arg_descriptor<uint16_t, false, true, 2> arg_quorumnet_port = {
"quorumnet-port",
"The port on which this service node listen for direct connections from other "
"service nodes for quorum messages. The port must be publicly reachable "
"on the `--service-node-public-ip' address and binds to the p2p IP address."
" Only applies when running as a service node.",
config::QNET_DEFAULT_PORT,
{{&cryptonote::arg_testnet_on, &cryptonote::arg_devnet_on}},
[](std::array<bool, 2> testnet_devnet, bool defaulted, uint16_t val) -> uint16_t {
return defaulted && testnet_devnet[0] ? config::testnet::QNET_DEFAULT_PORT
: defaulted && testnet_devnet[1] ? config::devnet::QNET_DEFAULT_PORT
: val;
}};
static const command_line::arg_descriptor<bool> arg_omq_quorumnet_public{
"lmq-public-quorumnet",
"Allow the curve-enabled quorumnet address (for a Service Node) to be used for public RPC "
"commands as if passed to --lmq-curve-public. "
"Note that even without this option the quorumnet port can be used for RPC commands by "
"--lmq-admin and --lmq-user pubkeys.",
false};
static const command_line::arg_descriptor<std::string> arg_block_notify = {
"block-notify",
"Run a program for each new block, '%s' will be replaced by the block hash",
""};
static const command_line::arg_descriptor<bool> arg_prune_blockchain = {
"prune-blockchain", "Prune blockchain", false};
static const command_line::arg_descriptor<std::string> arg_reorg_notify = {
"reorg-notify",
"Run a program for each reorg, '%s' will be replaced by the split height, "
"'%h' will be replaced by the new blockchain height, and '%n' will be "
"replaced by the number of new blocks in the new chain",
""};
static const command_line::arg_descriptor<bool> arg_keep_alt_blocks = {
"keep-alt-blocks", "Keep alternative blocks on restart", false};
static const command_line::arg_descriptor<uint64_t> arg_store_quorum_history = {
"store-quorum-history",
"Store the service node quorum history for the last N blocks to allow historic quorum "
"lookups "
"(e.g. by a block explorer). Specify the number of blocks of history to store, or 1 to "
"store "
"the entire history. Requires considerably more memory and block chain storage.",
0};
// Loads stubs that fail if invoked. The stubs are replaced in the
// cryptonote_protocol/quorumnet.cpp glue code.
[[noreturn]] static void need_core_init(std::string_view stub_name) {
throw std::logic_error(
"Internal error: core callback initialization was not performed for "s +
std::string(stub_name));
}
void (*long_poll_trigger)(tx_memory_pool& pool) = [](tx_memory_pool&) {
need_core_init("long_poll_trigger"sv);
};
quorumnet_new_proc* quorumnet_new = [](core&) -> void* { need_core_init("quorumnet_new"sv); };
quorumnet_init_proc* quorumnet_init = [](core&, void*) { need_core_init("quorumnet_init"sv); };
quorumnet_delete_proc* quorumnet_delete = [](void*&) { need_core_init("quorumnet_delete"sv); };
quorumnet_relay_obligation_votes_proc* quorumnet_relay_obligation_votes =
[](void*, const std::vector<service_nodes::quorum_vote_t>&) {
need_core_init("quorumnet_relay_obligation_votes"sv);
};
quorumnet_send_blink_proc* quorumnet_send_blink =
[](core&, const std::string&) -> std::future<std::pair<blink_result, std::string>> {
need_core_init("quorumnet_send_blink"sv);
};
quorumnet_pulse_relay_message_to_quorum_proc* quorumnet_pulse_relay_message_to_quorum =
[](void*, pulse::message const&, service_nodes::quorum const&, bool) -> void {
need_core_init("quorumnet_pulse_relay_message_to_quorum"sv);
};
//-----------------------------------------------------------------------------------------------
core::core() :
m_mempool(m_blockchain_storage),
m_service_node_list(m_blockchain_storage),
m_blockchain_storage(m_mempool, m_service_node_list),
m_quorum_cop(*this),
m_miner(this,
[this](const cryptonote::block& b,
uint64_t height,
unsigned int threads,
crypto::hash& hash) {
hash = cryptonote::get_block_longhash_w_blockchain(
m_nettype, &m_blockchain_storage, b, height, threads);
return true;
}),
m_pprotocol(&m_protocol_stub),
m_starter_message_showed(false),
m_target_blockchain_height(0),
m_last_json_checkpoints_update(0),
m_nettype(network_type::UNDEFINED),
m_last_storage_server_ping(0),
m_last_lokinet_ping(0),
m_pad_transactions(false),
ss_version{0},
lokinet_version{0} {
m_checkpoints_updating.clear();
2023-04-13 15:50:13 +02:00
}
void core::set_cryptonote_protocol(i_cryptonote_protocol* pprotocol) {
if (pprotocol)
m_pprotocol = pprotocol;
2014-03-03 23:07:58 +01:00
else
2023-04-13 15:50:13 +02:00
m_pprotocol = &m_protocol_stub;
}
//-----------------------------------------------------------------------------------------------
bool core::update_checkpoints_from_json_file() {
if (m_checkpoints_updating.test_and_set())
return true;
// load json checkpoints every 10min and verify them with respect to what blocks we already have
bool res = true;
2023-04-13 15:50:13 +02:00
if (time(NULL) - m_last_json_checkpoints_update >= 600) {
res = m_blockchain_storage.update_checkpoints_from_json_file(m_checkpoints_path);
m_last_json_checkpoints_update = time(NULL);
}
m_checkpoints_updating.clear();
// if anything fishy happened getting new checkpoints, bring down the house
2023-04-13 15:50:13 +02:00
if (!res) {
graceful_exit();
}
return res;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------
void core::stop() {
m_miner.stop();
2016-12-04 13:27:45 +01:00
m_blockchain_storage.cancel();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------
void core::init_options(boost::program_options::options_description& desc) {
2018-01-21 16:29:55 +01:00
command_line::add_arg(desc, arg_data_dir);
command_line::add_arg(desc, arg_test_drop_download);
command_line::add_arg(desc, arg_test_drop_download_height);
command_line::add_arg(desc, arg_testnet_on);
2020-08-11 23:41:54 +02:00
command_line::add_arg(desc, arg_devnet_on);
command_line::add_arg(desc, arg_regtest_on);
command_line::add_arg(desc, arg_keep_fakechain);
command_line::add_arg(desc, arg_fixed_difficulty);
command_line::add_arg(desc, arg_dev_allow_local);
command_line::add_arg(desc, arg_prep_blocks_threads);
command_line::add_arg(desc, arg_fast_block_sync);
command_line::add_arg(desc, arg_show_time_stats);
command_line::add_arg(desc, arg_block_sync_size);
command_line::add_arg(desc, arg_offline);
command_line::add_arg(desc, arg_block_download_max_size);
command_line::add_arg(desc, arg_max_txpool_weight);
command_line::add_arg(desc, arg_service_node);
command_line::add_arg(desc, arg_public_ip);
command_line::add_arg(desc, arg_storage_server_port);
command_line::add_arg(desc, arg_quorumnet_port);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
command_line::add_arg(desc, arg_pad_transactions);
command_line::add_arg(desc, arg_block_notify);
2023-04-13 15:50:13 +02:00
#if 0 // TODO(oxen): Pruning not supported because of Service Node List
command_line::add_arg(desc, arg_prune_blockchain);
#endif
command_line::add_arg(desc, arg_reorg_notify);
command_line::add_arg(desc, arg_keep_alt_blocks);
command_line::add_arg(desc, arg_store_quorum_history);
command_line::add_arg(desc, arg_omq_quorumnet_public);
miner::init_options(desc);
BlockchainDB::init_options(desc);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::handle_command_line(const boost::program_options::variables_map& vm) {
if (m_nettype != network_type::FAKECHAIN) {
const bool testnet = command_line::get_arg(vm, arg_testnet_on);
const bool devnet = command_line::get_arg(vm, arg_devnet_on);
m_nettype = testnet ? network_type::TESTNET
: devnet ? network_type::DEVNET
: network_type::MAINNET;
2018-02-16 12:04:04 +01:00
}
m_check_uptime_proof_interval.interval(get_net_config().UPTIME_PROOF_CHECK_INTERVAL);
m_config_folder = fs::u8path(command_line::get_arg(vm, arg_data_dir));
test_drop_download_height(command_line::get_arg(vm, arg_test_drop_download_height));
m_pad_transactions = get_arg(vm, arg_pad_transactions);
m_offline = get_arg(vm, arg_offline);
if (command_line::get_arg(vm, arg_test_drop_download) == true)
2023-04-13 15:50:13 +02:00
test_drop_download();
if (command_line::get_arg(vm, arg_dev_allow_local))
2023-04-13 15:50:13 +02:00
m_service_node_list.debug_allow_local_ips = true;
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
m_service_node = command_line::get_arg(vm, arg_service_node);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (m_service_node) {
2023-04-13 15:50:13 +02:00
/// TODO: parse these options early, before we start p2p server etc?
m_quorumnet_port = command_line::get_arg(vm, arg_quorumnet_port);
bool args_okay = true;
if (m_quorumnet_port == 0) {
log::error(
logcat,
"Quorumnet port cannot be 0; please specify a valid port to listen on with: "
"'--{} <port>'",
arg_quorumnet_port.name);
args_okay = false;
}
2023-04-13 15:50:13 +02:00
const std::string pub_ip = command_line::get_arg(vm, arg_public_ip);
if (pub_ip.size()) {
if (!epee::string_tools::get_ip_int32_from_string(m_sn_public_ip, pub_ip)) {
log::error(logcat, "Unable to parse IPv4 public address from: {}", pub_ip);
args_okay = false;
}
if (!epee::net_utils::is_ip_public(m_sn_public_ip)) {
if (m_service_node_list.debug_allow_local_ips) {
log::warning(
logcat,
"Address given for public-ip is not public; allowing it because "
"dev-allow-local-ips was specified. This service node WILL NOT WORK ON "
"THE PUBLIC OXEN NETWORK!");
} else {
log::error(
logcat,
"Address given for public-ip is not public: {}",
epee::string_tools::get_ip_string_from_int32(m_sn_public_ip));
args_okay = false;
}
}
} else {
log::error(
logcat,
"Please specify an IPv4 public address which the service node & storage server "
"is accessible from with: '--{} <ip address>'",
arg_public_ip.name);
args_okay = false;
}
2023-04-13 15:50:13 +02:00
if (!args_okay) {
log::error(
logcat,
"IMPORTANT: One or more required service node-related configuration "
"settings/options were omitted or invalid please fix them and restart oxend.");
return false;
}
}
2014-03-03 23:07:58 +01:00
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
uint64_t core::get_current_blockchain_height() const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_current_blockchain_height();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::pair<uint64_t, crypto::hash> core::get_blockchain_top() const {
std::pair<uint64_t, crypto::hash> result;
result.second = m_blockchain_storage.get_tail_id(result.first);
return result;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_blocks(
uint64_t start_offset,
size_t count,
std::vector<std::pair<std::string, block>>& blocks,
std::vector<std::string>& txs) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_blocks(start_offset, count, blocks, txs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_blocks(
uint64_t start_offset,
size_t count,
std::vector<std::pair<std::string, block>>& blocks) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_blocks(start_offset, count, blocks);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_blocks(uint64_t start_offset, size_t count, std::vector<block>& blocks) const {
return m_blockchain_storage.get_blocks_only(start_offset, count, blocks);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_blocks(
const std::vector<crypto::hash>& block_ids,
std::vector<std::pair<std::string, block>> blocks,
std::unordered_set<crypto::hash>* missed_bs) const {
return m_blockchain_storage.get_blocks(block_ids, blocks, missed_bs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_transactions(
const std::vector<crypto::hash>& txs_ids,
std::vector<std::string>& txs,
std::unordered_set<crypto::hash>* missed_txs) const {
return m_blockchain_storage.get_transactions_blobs(txs_ids, txs, missed_txs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_split_transactions_blobs(
const std::vector<crypto::hash>& txs_ids,
std::vector<std::tuple<crypto::hash, std::string, crypto::hash, std::string>>& txs,
std::unordered_set<crypto::hash>* missed_txs) const {
return m_blockchain_storage.get_split_transactions_blobs(txs_ids, txs, missed_txs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_transactions(
const std::vector<crypto::hash>& txs_ids,
std::vector<transaction>& txs,
std::unordered_set<crypto::hash>* missed_txs) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_transactions(txs_ids, txs, missed_txs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_alternative_blocks(std::vector<block>& blocks) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_alternative_blocks(blocks);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
size_t core::get_alternative_blocks_count() const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_alternative_blocks_count();
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
static std::string time_ago_str(time_t now, time_t then) {
if (then >= now)
2023-04-13 15:50:13 +02:00
return "now"s;
if (then == 0)
2023-04-13 15:50:13 +02:00
return "never"s;
int seconds = now - then;
if (seconds >= 60)
2023-04-13 15:50:13 +02:00
return std::to_string(seconds / 60) + "m" + std::to_string(seconds % 60) + "s";
return std::to_string(seconds % 60) + "s";
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
// Returns a bool on whether the service node is currently active
bool core::is_active_sn() const {
auto info = get_my_sn_info();
return (info && info->is_active());
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
// Returns the service nodes info
std::shared_ptr<const service_nodes::service_node_info> core::get_my_sn_info() const {
auto& snl = get_service_node_list();
const auto& pubkey = get_service_keys().pub;
2023-04-13 15:50:13 +02:00
auto states = snl.get_service_node_list_state({pubkey});
if (states.empty())
2023-04-13 15:50:13 +02:00
return nullptr;
else {
return states[0].info;
}
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
// Returns a string for systemd status notifications such as:
// Height: 1234567, SN: active, proof: 55m12s, storage: 4m48s, lokinet: 47s
std::string core::get_status_string() const {
std::string s;
s.reserve(128);
2023-04-13 15:50:13 +02:00
s += 'v';
s += OXEN_VERSION_STR;
s += "; Height: ";
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
s += std::to_string(get_blockchain_storage().get_current_blockchain_height());
s += ", SN: ";
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (!service_node())
2023-04-13 15:50:13 +02:00
s += "no";
else {
auto& snl = get_service_node_list();
const auto& pubkey = get_service_keys().pub;
auto states = snl.get_service_node_list_state({pubkey});
if (states.empty())
s += "not registered";
else {
auto& info = *states[0].info;
if (!info.is_fully_funded())
s += "awaiting contr.";
else if (info.is_active())
s += "active";
else if (info.is_decommissioned())
s += "decomm.";
uint64_t last_proof = 0;
snl.access_proof(pubkey, [&](auto& proof) { last_proof = proof.timestamp; });
s += ", proof: ";
time_t now = std::time(nullptr);
s += time_ago_str(now, last_proof);
s += ", storage: ";
s += time_ago_str(now, m_last_storage_server_ping);
s += ", lokinet: ";
s += time_ago_str(now, m_last_lokinet_ping);
}
}
return s;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
//-----------------------------------------------------------------------------------------------
bool core::init(
const boost::program_options::variables_map& vm,
const cryptonote::test_options* test_options,
const GetCheckpointsCallback& get_checkpoints /* = nullptr */) {
start_time = std::time(nullptr);
const bool regtest = command_line::get_arg(vm, arg_regtest_on);
2023-04-13 15:50:13 +02:00
if (test_options != NULL || regtest) {
m_nettype = network_type::FAKECHAIN;
2018-02-16 12:04:04 +01:00
}
2018-11-21 00:43:09 +01:00
bool r = handle_command_line(vm);
/// Currently terminating before blockchain is initialized results in a crash
/// during deinitialization... TODO: fix that
CHECK_AND_ASSERT_MES(r, false, "Failed to apply command line options.");
std::string db_sync_mode = command_line::get_arg(vm, cryptonote::arg_db_sync_mode);
bool db_salvage = command_line::get_arg(vm, cryptonote::arg_db_salvage) != 0;
bool fast_sync = command_line::get_arg(vm, arg_fast_block_sync) != 0;
uint64_t blocks_threads = command_line::get_arg(vm, arg_prep_blocks_threads);
size_t max_txpool_weight = command_line::get_arg(vm, arg_max_txpool_weight);
bool const prune_blockchain = false; /* command_line::get_arg(vm, arg_prune_blockchain); */
bool keep_alt_blocks = command_line::get_arg(vm, arg_keep_alt_blocks);
bool keep_fakechain = command_line::get_arg(vm, arg_keep_fakechain);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
r = init_service_keys();
CHECK_AND_ASSERT_MES(r, false, "Failed to create or load service keys");
2023-04-13 15:50:13 +02:00
if (m_service_node) {
// Only use our service keys for our service node if we are running in SN mode:
m_service_node_list.set_my_service_node_keys(&m_service_keys);
}
auto folder = m_config_folder;
if (m_nettype == network_type::FAKECHAIN)
2023-04-13 15:50:13 +02:00
folder /= "fake";
// make sure the data directory exists, and try to lock it
2023-04-13 15:50:13 +02:00
if (std::error_code ec;
!fs::is_directory(folder, ec) && !fs::create_directories(folder, ec) && ec) {
log::error(
logcat,
"Failed to create directory " + folder.u8string() +
(ec ? ": " + ec.message() : ""s));
return false;
}
std::unique_ptr<BlockchainDB> db(new_db());
2023-04-13 15:50:13 +02:00
if (!db) {
log::error(logcat, "Failed to initialize a database");
return false;
}
2021-02-12 05:19:30 +01:00
auto ons_db_file_path = folder / "ons.db";
2023-04-13 15:50:13 +02:00
if (fs::exists(folder / "lns.db"))
ons_db_file_path = folder / "lns.db";
auto sqlite_db_file_path = folder / "sqlite.db";
2023-04-13 15:50:13 +02:00
if (m_nettype == network_type::FAKECHAIN) {
sqlite_db_file_path = ":memory:";
}
auto sqliteDB = std::make_shared<cryptonote::BlockchainSQLite>(m_nettype, sqlite_db_file_path);
folder /= db->get_db_name();
log::info(logcat, "Loading blockchain from folder {} ...", folder);
// default to fast:async:1 if overridden
blockchain_db_sync_mode sync_mode = db_defaultsync;
bool sync_on_blocks = true;
uint64_t sync_threshold = 1;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
if (m_nettype == network_type::FAKECHAIN && !keep_fakechain) {
// reset the db by removing the database file before opening it
if (!db->remove_data_file(folder)) {
log::error(logcat, "Failed to remove data file in {}", folder);
return false;
}
fs::remove(ons_db_file_path);
}
2023-04-13 15:50:13 +02:00
try {
uint64_t db_flags = 0;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
std::vector<std::string> options;
boost::trim(db_sync_mode);
boost::split(options, db_sync_mode, boost::is_any_of(" :"));
const bool db_sync_mode_is_default =
command_line::is_arg_defaulted(vm, cryptonote::arg_db_sync_mode);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
for (const auto& option : options)
log::debug(logcat, "option: {}", option);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
// default to fast:async:1
2023-04-13 15:50:13 +02:00
uint64_t DEFAULT_FLAGS = DBF_FAST;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
if (options.size() == 0) {
// default to fast:async:1
db_flags = DEFAULT_FLAGS;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
}
2023-04-13 15:50:13 +02:00
bool safemode = false;
if (options.size() >= 1) {
if (options[0] == "safe") {
safemode = true;
db_flags = DBF_SAFE;
sync_mode = db_sync_mode_is_default ? db_defaultsync : db_nosync;
} else if (options[0] == "fast") {
db_flags = DBF_FAST;
sync_mode = db_sync_mode_is_default ? db_defaultsync : db_async;
} else if (options[0] == "fastest") {
db_flags = DBF_FASTEST;
sync_threshold = 1000; // default to fastest:async:1000
sync_mode = db_sync_mode_is_default ? db_defaultsync : db_async;
} else
db_flags = DEFAULT_FLAGS;
}
2023-04-13 15:50:13 +02:00
if (options.size() >= 2 && !safemode) {
if (options[1] == "sync")
sync_mode = db_sync_mode_is_default ? db_defaultsync : db_sync;
else if (options[1] == "async")
sync_mode = db_sync_mode_is_default ? db_defaultsync : db_async;
}
2023-04-13 15:50:13 +02:00
if (options.size() >= 3 && !safemode) {
char* endptr;
uint64_t threshold = strtoull(options[2].c_str(), &endptr, 0);
if (*endptr == '\0' || !strcmp(endptr, "blocks")) {
sync_on_blocks = true;
sync_threshold = threshold;
} else if (!strcmp(endptr, "bytes")) {
sync_on_blocks = false;
sync_threshold = threshold;
} else {
log::error(logcat, "Invalid db sync mode: {}", options[2]);
return false;
}
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
if (db_salvage)
db_flags |= DBF_SALVAGE;
2023-04-13 15:50:13 +02:00
db->open(folder, m_nettype, db_flags);
if (!db->m_open)
return false;
} catch (const DB_ERROR& e) {
log::error(logcat, "Error opening database: {}", e.what());
return false;
}
2023-04-13 15:50:13 +02:00
m_blockchain_storage.set_user_options(
blocks_threads, sync_on_blocks, sync_threshold, sync_mode, fast_sync);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
// We need this hook to get added before the block hook below, so that it fires first and
// catches the start of a reorg before the block hook fires for the block in the reorg.
2023-04-13 15:50:13 +02:00
try {
if (!command_line::is_arg_defaulted(vm, arg_reorg_notify))
m_blockchain_storage.hook_block_post_add(
[this, notify = tools::Notify(command_line::get_arg(vm, arg_reorg_notify))](
const auto& info) {
if (!info.reorg)
return;
auto h = get_current_blockchain_height();
notify.notify(
"%s", info.split_height, "%h", h, "%n", h - info.split_height);
});
} catch (const std::exception& e) {
log::error(logcat, "Failed to parse reorg notify spec");
}
2023-04-13 15:50:13 +02:00
try {
if (!command_line::is_arg_defaulted(vm, arg_block_notify))
m_blockchain_storage.hook_block_post_add(
[notify = tools::Notify(command_line::get_arg(vm, arg_block_notify))](
const auto& info) {
notify.notify("%s", tools::type_to_hex(get_block_hash(info.block)));
});
} catch (const std::exception& e) {
log::error(logcat, "Failed to parse block rate notify spec");
}
2023-04-13 15:50:13 +02:00
cryptonote::test_options regtest_test_options{};
2023-04-13 15:50:13 +02:00
for (auto [it, end] = get_hard_forks(network_type::MAINNET); it != end; it++) {
regtest_test_options.hard_forks.push_back(hard_fork{
it->version,
it->snode_revision,
regtest_test_options.hard_forks.size(),
std::time(nullptr)});
}
// Service Nodes
2023-04-13 15:50:13 +02:00
m_service_node_list.set_quorum_history_storage(
command_line::get_arg(vm, arg_store_quorum_history));
// NOTE: Implicit dependency. Service node list needs to be hooked before checkpoints.
2023-04-13 15:50:13 +02:00
m_blockchain_storage.hook_blockchain_detached(
[this](const auto& info) { m_service_node_list.blockchain_detached(info.height); });
m_blockchain_storage.hook_init([this] { m_service_node_list.init(); });
2023-04-13 15:50:13 +02:00
m_blockchain_storage.hook_validate_miner_tx(
[this](const auto& info) { m_service_node_list.validate_miner_tx(info); });
m_blockchain_storage.hook_alt_block_add(
[this](const auto& info) { m_service_node_list.alt_block_add(info); });
m_blockchain_storage.hook_blockchain_detached([this](const auto& info) {
m_blockchain_storage.sqlite_db()->blockchain_detached(info.height);
});
// NOTE: There is an implicit dependency on service node lists being hooked first!
m_blockchain_storage.hook_init([this] { m_quorum_cop.init(); });
2023-04-13 15:50:13 +02:00
m_blockchain_storage.hook_block_add(
[this](const auto& info) { m_quorum_cop.block_add(info.block, info.txs); });
m_blockchain_storage.hook_blockchain_detached([this](const auto& info) {
m_quorum_cop.blockchain_detached(info.height, info.by_pop_blocks);
});
2023-04-13 15:50:13 +02:00
m_blockchain_storage.hook_block_post_add([this](const auto&) { update_omq_sns(); });
// Checkpoints
m_checkpoints_path = m_config_folder / fs::u8path(JSON_HASH_FILE_NAME);
2023-04-13 15:50:13 +02:00
sqlite3* ons_db = ons::init_oxen_name_system(ons_db_file_path, db->is_read_only());
if (!ons_db)
return false;
2021-01-14 21:28:50 +01:00
init_oxenmq(vm);
const difficulty_type fixed_difficulty = command_line::get_arg(vm, arg_fixed_difficulty);
2023-04-13 15:50:13 +02:00
r = m_blockchain_storage.init(
db.release(),
ons_db,
std::move(sqliteDB),
m_nettype,
m_offline,
regtest ? &regtest_test_options : test_options,
fixed_difficulty,
get_checkpoints);
CHECK_AND_ASSERT_MES(r, false, "Failed to initialize blockchain storage");
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
r = m_mempool.init(max_txpool_weight);
CHECK_AND_ASSERT_MES(r, false, "Failed to initialize memory pool");
// now that we have a valid m_blockchain_storage, we can clean out any
// transactions in the pool that do not conform to the current fork
m_mempool.validate(m_blockchain_storage.get_network_version());
bool show_time_stats = command_line::get_arg(vm, arg_show_time_stats) != 0;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
m_blockchain_storage.set_show_time_stats(show_time_stats);
2014-03-03 23:07:58 +01:00
block_sync_size = command_line::get_arg(vm, arg_block_sync_size);
if (block_sync_size > BLOCKS_SYNCHRONIZING_MAX_COUNT)
2023-04-13 15:50:13 +02:00
log::error(
logcat,
"Error --block-sync-size cannot be greater than {}",
BLOCKS_SYNCHRONIZING_MAX_COUNT);
log::info(logcat, "Loading checkpoints");
2023-04-13 15:50:13 +02:00
CHECK_AND_ASSERT_MES(
update_checkpoints_from_json_file(),
false,
"One or more checkpoints loaded from json conflicted with existing checkpoints.");
2018-02-16 12:04:04 +01:00
r = m_miner.init(vm, m_nettype);
CHECK_AND_ASSERT_MES(r, false, "Failed to initialize miner instance");
2014-03-03 23:07:58 +01:00
if (!keep_alt_blocks && !m_blockchain_storage.get_db().is_read_only())
2023-04-13 15:50:13 +02:00
m_blockchain_storage.get_db().drop_alt_blocks();
if (prune_blockchain) {
// display a message if the blockchain is not pruned yet
if (!m_blockchain_storage.get_blockchain_pruning_seed()) {
log::info(logcat, "Pruning blockchain...");
CHECK_AND_ASSERT_MES(
m_blockchain_storage.prune_blockchain(), false, "Failed to prune blockchain");
} else {
CHECK_AND_ASSERT_MES(
m_blockchain_storage.update_blockchain_pruning(),
false,
"Failed to update blockchain pruning");
}
}
return true;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
/// Loads a key pair from disk, if it exists, otherwise generates a new key pair and saves it to
/// disk.
///
/// get_pubkey - a function taking (privkey &, pubkey &) that sets the pubkey from the privkey;
/// returns true for success/false for failure
/// generate_pair - a void function taking (privkey &, pubkey &) that sets them to the generated
/// values; can throw on error.
template <typename Privkey, typename Pubkey, typename GetPubkey, typename GeneratePair>
bool init_key(
const fs::path& keypath,
Privkey& privkey,
Pubkey& pubkey,
GetPubkey get_pubkey,
GeneratePair generate_pair) {
std::error_code ec;
if (fs::exists(keypath, ec)) {
std::string keystr;
bool r = tools::slurp_file(keypath, keystr);
memcpy(&unwrap(unwrap(privkey)), keystr.data(), sizeof(privkey));
memwipe(&keystr[0], keystr.size());
CHECK_AND_ASSERT_MES(
r, false, "failed to load service node key from " + keypath.u8string());
CHECK_AND_ASSERT_MES(
keystr.size() == sizeof(privkey),
false,
"service node key file " + keypath.u8string() + " has an invalid size");
r = get_pubkey(privkey, pubkey);
CHECK_AND_ASSERT_MES(r, false, "failed to generate pubkey from secret key");
} else {
try {
generate_pair(privkey, pubkey);
} catch (const std::exception& e) {
log::error(logcat, "failed to generate keypair {}", e.what());
return false;
}
2023-04-13 15:50:13 +02:00
bool r = tools::dump_file(keypath, tools::view_guts(privkey));
CHECK_AND_ASSERT_MES(r, false, "failed to save service node key to " + keypath.u8string());
2023-04-13 15:50:13 +02:00
fs::permissions(keypath, fs::perms::owner_read, ec);
}
return true;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
//-----------------------------------------------------------------------------------------------
bool core::init_service_keys() {
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
auto& keys = m_service_keys;
static_assert(
2023-04-13 15:50:13 +02:00
sizeof(crypto::ed25519_public_key) == crypto_sign_ed25519_PUBLICKEYBYTES &&
sizeof(crypto::ed25519_secret_key) == crypto_sign_ed25519_SECRETKEYBYTES &&
sizeof(crypto::ed25519_signature) == crypto_sign_BYTES &&
sizeof(crypto::x25519_public_key) == crypto_scalarmult_curve25519_BYTES &&
sizeof(crypto::x25519_secret_key) == crypto_scalarmult_curve25519_BYTES,
"Invalid ed25519/x25519 sizes");
// <data>/key_ed25519: Standard ed25519 secret key. We always have this, and generate one if it
// doesn't exist.
//
// As of Loki 8.x, if this exists and `key` doesn't, we use this key for everything. For
// compatibility with earlier versions we also allow `key` to contain a separate monero privkey
// for the SN keypair. (The main difference is that the Monero keypair is unclamped and that it
// only contains the private key value but not the secret key value that we need for full
// Ed25519 signing).
//
2023-04-13 15:50:13 +02:00
if (!init_key(
m_config_folder / "key_ed25519",
keys.key_ed25519,
keys.pub_ed25519,
[](crypto::ed25519_secret_key& sk, crypto::ed25519_public_key& pk) {
crypto_sign_ed25519_sk_to_pk(pk.data(), sk.data());
return true;
},
[](crypto::ed25519_secret_key& sk, crypto::ed25519_public_key& pk) {
crypto_sign_ed25519_keypair(pk.data(), sk.data());
}))
return false;
2023-04-13 15:50:13 +02:00
// Standard x25519 keys generated from the ed25519 keypair, used for encrypted communication
// between SNs
Overhaul and fix crypto::{public_key,ec_point,etc.} types - Remove implicit `operator bool` from ec_point/public_key/etc. which was causing all sorts of implicit conversion mess and bugs. - Change ec_point/public_key/etc. to use a `std::array<unsigned char, 32>` (via a base type) rather than a C-array of char that has to be reinterpret_cast<>'ed all over the place. - Add methods to ec_point/public_key/etc. that make it work more like a container of bytes (`.data()`, `.size()`, `operator[]`, `begin()`, `end()`). - Make a generic `crypto::null<T>` that is a constexpr all-0 `T`, rather than the mishmash `crypto::null_hash`, crypto::null_pkey, crypto::hash::null(), and so on. - Replace three metric tons of `crypto::hash blahblah = crypto::null_hash;` with the much simpler `crypto::hash blahblah{};`, because there's no need to make a copy of a null hash in all these cases. (Likewise for a few other null_whatevers). - Remove a whole bunch of `if (blahblah == crypto::null_hash)` and `if (blahblah != crypto::null_hash)` with the more concise `if (!blahblah)` and `if (blahblah)` (which are fine via the newly *explicit* bool conversion operators). - `crypto::signature` becomes a 64-byte container (as above) but with `c()` and `r()` to get the c() and r() data pointers. (Previously `.c` and `.r` were `ec_scalar`s). - Delete with great prejudice CRYPTO_MAKE_COMPARABLE and CRYPTO_MAKE_HASHABLE and all the other utter trash in `crypto/generic-ops.h`. - De-inline functions in very common crypto/*.h files so that they don't have to get compiled 300 times. - Remove the disgusting include-a-C-header-inside-a-C++-namespace garbage from some crypto headers trying to be both a C and *different* C++ header at once. - Remove the toxic, disgusting, shameful `operator&` on ec_scalar, etc. that replace `&x` with `reinterpret_cast x into an unsigned char*`. This was pure toxic waste. - changed some `<<` outputs to fmt - Random other small changes encountered while fixing everything that cascaded out of the above changes.
2022-10-15 03:22:44 +02:00
int rc = crypto_sign_ed25519_pk_to_curve25519(keys.pub_x25519.data(), keys.pub_ed25519.data());
CHECK_AND_ASSERT_MES(rc == 0, false, "failed to convert ed25519 pubkey to x25519");
Overhaul and fix crypto::{public_key,ec_point,etc.} types - Remove implicit `operator bool` from ec_point/public_key/etc. which was causing all sorts of implicit conversion mess and bugs. - Change ec_point/public_key/etc. to use a `std::array<unsigned char, 32>` (via a base type) rather than a C-array of char that has to be reinterpret_cast<>'ed all over the place. - Add methods to ec_point/public_key/etc. that make it work more like a container of bytes (`.data()`, `.size()`, `operator[]`, `begin()`, `end()`). - Make a generic `crypto::null<T>` that is a constexpr all-0 `T`, rather than the mishmash `crypto::null_hash`, crypto::null_pkey, crypto::hash::null(), and so on. - Replace three metric tons of `crypto::hash blahblah = crypto::null_hash;` with the much simpler `crypto::hash blahblah{};`, because there's no need to make a copy of a null hash in all these cases. (Likewise for a few other null_whatevers). - Remove a whole bunch of `if (blahblah == crypto::null_hash)` and `if (blahblah != crypto::null_hash)` with the more concise `if (!blahblah)` and `if (blahblah)` (which are fine via the newly *explicit* bool conversion operators). - `crypto::signature` becomes a 64-byte container (as above) but with `c()` and `r()` to get the c() and r() data pointers. (Previously `.c` and `.r` were `ec_scalar`s). - Delete with great prejudice CRYPTO_MAKE_COMPARABLE and CRYPTO_MAKE_HASHABLE and all the other utter trash in `crypto/generic-ops.h`. - De-inline functions in very common crypto/*.h files so that they don't have to get compiled 300 times. - Remove the disgusting include-a-C-header-inside-a-C++-namespace garbage from some crypto headers trying to be both a C and *different* C++ header at once. - Remove the toxic, disgusting, shameful `operator&` on ec_scalar, etc. that replace `&x` with `reinterpret_cast x into an unsigned char*`. This was pure toxic waste. - changed some `<<` outputs to fmt - Random other small changes encountered while fixing everything that cascaded out of the above changes.
2022-10-15 03:22:44 +02:00
crypto_sign_ed25519_sk_to_curve25519(keys.key_x25519.data(), keys.key_ed25519.data());
// Legacy primary SN key file; we only load this if it exists, otherwise we use `key_ed25519`
// for the primary SN keypair. (This key predates the Ed25519 keys and so is needed for
// backwards compatibility with existing active service nodes.) The legacy key consists of
// *just* the private point, but not the seed, and so cannot be used for full Ed25519 signatures
// (which rely on the seed for signing).
if (m_service_node) {
2023-04-13 15:50:13 +02:00
if (std::error_code ec; !fs::exists(m_config_folder / "key", ec)) {
epee::wipeable_string privkey_signhash;
privkey_signhash.resize(crypto_hash_sha512_BYTES);
unsigned char* pk_sh_data = reinterpret_cast<unsigned char*>(privkey_signhash.data());
crypto_hash_sha512(pk_sh_data, keys.key_ed25519.data(), 32 /* first 32 bytes are the seed to be SHA512 hashed (the last 32 are just the pubkey) */);
// Clamp private key (as libsodium does and expects -- see
// https://www.jcraige.com/an-explainer-on-ed25519-clamping if you want the broader
// reasons)
pk_sh_data[0] &= 248;
pk_sh_data[31] &= 63; // (some implementations put 127 here, but with the |64 in the
// next line it is the same thing)
pk_sh_data[31] |= 64;
// Monero crypto requires a pointless check that the secret key is < basepoint, so
// calculate it mod basepoint to make it happy:
sc_reduce32(pk_sh_data);
std::memcpy(keys.key.data(), pk_sh_data, 32);
if (!crypto::secret_key_to_public_key(keys.key, keys.pub))
throw std::runtime_error{"Failed to derive primary key from ed25519 key"};
if (std::memcmp(keys.pub.data(), keys.pub_ed25519.data(), 32))
throw std::runtime_error{
"Internal error: unexpected primary pubkey and ed25519 pubkey mismatch"};
} else if (!init_key(
m_config_folder / "key",
keys.key,
keys.pub,
crypto::secret_key_to_public_key,
[](crypto::secret_key& key, crypto::public_key& pubkey) {
throw std::runtime_error{
"Internal error: old-style public keys are no longer "
"generated"};
}))
return false;
} else {
2023-04-13 15:50:13 +02:00
keys.key.zero();
keys.pub.zero();
}
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (m_service_node) {
2023-04-13 15:50:13 +02:00
log::info(logcat, fg(fmt::terminal_color::yellow), "Service node public keys:");
log::info(
logcat,
fg(fmt::terminal_color::yellow),
"- primary: {}",
tools::type_to_hex(keys.pub));
log::info(
logcat,
fg(fmt::terminal_color::yellow),
"- ed25519: {}",
tools::type_to_hex(keys.pub_ed25519));
// .snode address is the ed25519 pubkey, encoded with base32z and with .snode appended:
log::info(
logcat,
fg(fmt::terminal_color::yellow),
"- lokinet: {}.snode",
oxenc::to_base32z(tools::view_guts(keys.pub_ed25519)));
log::info(
logcat,
fg(fmt::terminal_color::yellow),
"- x25519: {}",
tools::type_to_hex(keys.pub_x25519));
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
} else {
2023-04-13 15:50:13 +02:00
// Only print the x25519 version because it's the only thing useful for a non-SN (for
// encrypted OMQ RPC connections).
log::info(
logcat,
fg(fmt::terminal_color::yellow),
"x25519 public key: {}",
tools::type_to_hex(keys.pub_x25519));
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
}
return true;
2023-04-13 15:50:13 +02:00
}
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
2023-04-13 15:50:13 +02:00
oxenmq::AuthLevel core::omq_check_access(const crypto::x25519_public_key& pubkey) const {
auto it = m_omq_auth.find(pubkey);
if (it != m_omq_auth.end())
2023-04-13 15:50:13 +02:00
return it->second;
2021-01-14 21:28:50 +01:00
return oxenmq::AuthLevel::denied;
2023-04-13 15:50:13 +02:00
}
// Builds an allow function; takes `*this`, the default auth level, and whether this connection
// should allow incoming SN connections.
//
// default_auth should be AuthLevel::denied if only pre-approved connections may connect,
// AuthLevel::basic for public RPC, AuthLevel::admin for a (presumably localhost) unrestricted
// port, and AuthLevel::none for a super restricted mode (generally this is useful when there are
// also SN-restrictions on commands, i.e. for quorumnet).
//
// check_sn is whether we check an incoming key against known service nodes (and thus return
// "true" for the service node access if it checks out).
//
oxenmq::AuthLevel core::omq_allow(
std::string_view ip, std::string_view x25519_pubkey_str, oxenmq::AuthLevel default_auth) {
2021-01-14 21:28:50 +01:00
using namespace oxenmq;
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
AuthLevel auth = default_auth;
if (x25519_pubkey_str.size() == sizeof(crypto::x25519_public_key)) {
2023-04-13 15:50:13 +02:00
crypto::x25519_public_key x25519_pubkey;
std::memcpy(x25519_pubkey.data(), x25519_pubkey_str.data(), x25519_pubkey_str.size());
auto user_auth = omq_check_access(x25519_pubkey);
if (user_auth >= AuthLevel::basic) {
if (user_auth > auth)
auth = user_auth;
log::info(log::Cat("omq"), "Incoming {}-authenticated connection", auth);
}
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
2023-04-13 15:50:13 +02:00
log::info(
log::Cat("omq"),
"Incoming [{}] curve connection from {}/{}",
auth,
ip,
x25519_pubkey);
} else {
log::info(log::Cat("omq"), "Incoming [{}] plain connection from {}", auth, ip);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
}
return auth;
2023-04-13 15:50:13 +02:00
}
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
2023-04-13 15:50:13 +02:00
void core::init_oxenmq(const boost::program_options::variables_map& vm) {
2021-01-14 21:28:50 +01:00
using namespace oxenmq;
log::info(omqlogcat, "Starting oxenmq");
m_omq = std::make_unique<OxenMQ>(
2023-04-13 15:50:13 +02:00
tools::copy_guts(m_service_keys.pub_x25519),
tools::copy_guts(m_service_keys.key_x25519),
m_service_node,
[this](std::string_view x25519_pk) {
return m_service_node_list.remote_lookup(x25519_pk);
},
[](LogLevel omqlevel, const char* file, int line, std::string msg) {
auto level = *oxen::logging::parse_level(omqlevel);
if (omqlogcat->should_log(level))
omqlogcat->log({file, line, "omq"}, level, "{}", msg);
},
oxenmq::LogLevel::trace);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
// ping.ping: a simple debugging target for pinging the omq listener
m_omq->add_category("ping", Access{AuthLevel::none})
2023-04-13 15:50:13 +02:00
.add_request_command("ping", [](Message& m) {
log::info(log::Cat("omq"), "Received ping from {}", m.conn);
m.send_reply("pong");
});
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
2023-04-13 15:50:13 +02:00
if (m_service_node) {
// Service nodes always listen for quorumnet data on the p2p IP, quorumnet port
std::string listen_ip = vm["p2p-bind-ip"].as<std::string>();
if (listen_ip.empty())
listen_ip = "0.0.0.0";
std::string qnet_listen = "tcp://" + listen_ip + ":" + std::to_string(m_quorumnet_port);
log::info(logcat, "- listening on {} (quorumnet)", qnet_listen);
m_omq->listen_curve(
qnet_listen,
[this, public_ = command_line::get_arg(vm, arg_omq_quorumnet_public)](
std::string_view ip, std::string_view pk, bool) {
return omq_allow(ip, pk, public_ ? AuthLevel::basic : AuthLevel::none);
});
m_quorumnet_state = quorumnet_new(*this);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
}
quorumnet_init(*this, m_quorumnet_state);
2023-04-13 15:50:13 +02:00
}
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
2023-04-13 15:50:13 +02:00
void core::start_oxenmq() {
update_omq_sns(); // Ensure we have SNs set for the current block before starting
2023-04-13 15:50:13 +02:00
if (m_service_node) {
m_pulse_thread_id = m_omq->add_tagged_thread("pulse");
2023-04-13 15:50:13 +02:00
m_omq->add_timer(
[this]() { pulse::main(m_quorumnet_state, *this); },
std::chrono::milliseconds(500),
false,
m_pulse_thread_id);
m_omq->add_timer([this]() { this->check_service_node_time(); }, 5s, false);
}
m_omq->start();
}
//-----------------------------------------------------------------------------------------------
bool core::set_genesis_block(const block& b) {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.reset_and_set_genesis_block(b);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::deinit() {
#ifdef ENABLE_SYSTEMD
sd_notify(0, "STOPPING=1\nSTATUS=Shutting down");
#endif
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (m_quorumnet_state)
2023-04-13 15:50:13 +02:00
quorumnet_delete(m_quorumnet_state);
m_omq.reset();
m_service_node_list.store();
m_miner.stop();
m_mempool.deinit();
2016-10-03 03:06:55 +02:00
m_blockchain_storage.deinit();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::test_drop_download() {
m_test_drop_download = false;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::test_drop_download_height(uint64_t height) {
m_test_drop_download_height = height;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_test_drop_download() const {
return m_test_drop_download;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_test_drop_download_height() const {
if (m_test_drop_download_height == 0)
2023-04-13 15:50:13 +02:00
return true;
if (get_blockchain_storage().get_current_blockchain_height() <= m_test_drop_download_height)
2023-04-13 15:50:13 +02:00
return true;
return false;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::parse_incoming_tx_pre(tx_verification_batch_info& tx_info) {
if (tx_info.blob->size() > MAX_TX_SIZE) {
log::info(
logcat, "WRONG TRANSACTION BLOB, too big size {}, rejected", tx_info.blob->size());
tx_info.tvc.m_verifivation_failed = true;
tx_info.tvc.m_too_big = true;
return;
} else if (tx_info.blob->empty()) {
log::info(logcat, "WRONG TRANSACTION BLOB, blob is empty, rejected");
tx_info.tvc.m_verifivation_failed = true;
return;
}
2014-03-03 23:07:58 +01:00
tx_info.parsed = parse_and_validate_tx_from_blob(*tx_info.blob, tx_info.tx, tx_info.tx_hash);
2023-04-13 15:50:13 +02:00
if (!tx_info.parsed) {
log::info(logcat, "WRONG TRANSACTION BLOB, Failed to parse, rejected");
tx_info.tvc.m_verifivation_failed = true;
return;
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
// std::cout << "!"<< tx.vin.size() << std::endl;
2014-03-03 23:07:58 +01:00
std::lock_guard lock{bad_semantics_txes_lock};
2023-04-13 15:50:13 +02:00
for (int idx = 0; idx < 2; ++idx) {
if (bad_semantics_txes[idx].find(tx_info.tx_hash) != bad_semantics_txes[idx].end()) {
log::info(logcat, "Transaction already seen with bad semantics, rejected");
tx_info.tvc.m_verifivation_failed = true;
return;
}
}
tx_info.result = true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::set_semantics_failed(const crypto::hash& tx_hash) {
log::info(logcat, "WRONG TRANSACTION BLOB, Failed to check tx {} semantic, rejected", tx_hash);
bad_semantics_txes_lock.lock();
bad_semantics_txes[0].insert(tx_hash);
2023-04-13 15:50:13 +02:00
if (bad_semantics_txes[0].size() >= BAD_SEMANTICS_TXES_MAX_SIZE) {
std::swap(bad_semantics_txes[0], bad_semantics_txes[1]);
bad_semantics_txes[0].clear();
}
bad_semantics_txes_lock.unlock();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
static bool is_canonical_bulletproof_layout(const std::vector<rct::Bulletproof>& proofs) {
if (proofs.size() != 1)
2023-04-13 15:50:13 +02:00
return false;
const size_t sz = proofs[0].V.size();
if (sz == 0 || sz > TX_BULLETPROOF_MAX_OUTPUTS)
2023-04-13 15:50:13 +02:00
return false;
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::parse_incoming_tx_accumulated_batch(
std::vector<tx_verification_batch_info>& tx_info, bool kept_by_block) {
if (kept_by_block && get_blockchain_storage().is_within_compiled_block_hash_area()) {
log::trace(logcat, "Skipping semantics check for txs kept by block in embedded hash area");
return;
}
std::vector<const rct::rctSig*> rvv;
2023-04-13 15:50:13 +02:00
for (size_t n = 0; n < tx_info.size(); ++n) {
if (!tx_info[n].result || tx_info[n].already_have)
continue;
if (!check_tx_semantic(tx_info[n].tx, kept_by_block)) {
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
2023-04-13 15:50:13 +02:00
continue;
}
if (!tx_info[n].tx.is_transfer())
continue;
const rct::rctSig& rv = tx_info[n].tx.rct_signatures;
switch (rv.type) {
case rct::RCTType::Null:
// coinbase should not come here, so we reject for all other types
log::error(log::Cat("verify"), "Unexpected Null rctSig type");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
case rct::RCTType::Simple:
if (!rct::verRctSemanticsSimple(rv)) {
log::error(log::Cat("verify"), "rct signature semantics check failed");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
break;
case rct::RCTType::Full:
if (!rct::verRct(rv, true)) {
log::error(log::Cat("verify"), "rct signature semantics check failed");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
break;
case rct::RCTType::Bulletproof:
case rct::RCTType::Bulletproof2:
case rct::RCTType::CLSAG:
if (!is_canonical_bulletproof_layout(rv.p.bulletproofs)) {
log::error(log::Cat("verify"), "Bulletproof does not have canonical form");
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
rvv.push_back(&rv); // delayed batch verification
break;
default:
log::error(log::Cat("verify"), "Unknown rct type: {}", (int)rv.type);
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
break;
}
}
if (!rvv.empty() && !rct::verRctSemanticsSimple(rvv)) {
log::info(
logcat,
"One transaction among this group has bad semantics, verifying one at a time");
const bool assumed_bad = rvv.size() == 1; // if there's only one tx, it must be the bad one
for (size_t n = 0; n < tx_info.size(); ++n) {
if (!tx_info[n].result || tx_info[n].already_have)
continue;
if (!rct::is_rct_bulletproof(tx_info[n].tx.rct_signatures.type))
continue;
if (assumed_bad || !rct::verRctSemanticsSimple(tx_info[n].tx.rct_signatures)) {
set_semantics_failed(tx_info[n].tx_hash);
tx_info[n].tvc.m_verifivation_failed = true;
tx_info[n].result = false;
}
}
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::vector<cryptonote::tx_verification_batch_info> core::parse_incoming_txs(
const std::vector<std::string>& tx_blobs, const tx_pool_options& opts) {
// Caller needs to do this around both this *and* handle_parsed_txs
2023-04-13 15:50:13 +02:00
// auto lock = incoming_tx_lock();
std::vector<cryptonote::tx_verification_batch_info> tx_info(tx_blobs.size());
tools::threadpool& tpool = tools::threadpool::getInstance();
tools::threadpool::waiter waiter;
for (size_t i = 0; i < tx_blobs.size(); i++) {
2023-04-13 15:50:13 +02:00
tx_info[i].blob = &tx_blobs[i];
tpool.submit(&waiter, [this, &info = tx_info[i]] {
try {
parse_incoming_tx_pre(info);
} catch (const std::exception& e) {
log::error(log::Cat("verify"), "Exception in handle_incoming_tx_pre: {}", e.what());
info.tvc.m_verifivation_failed = true;
}
});
}
waiter.wait(&tpool);
2023-04-13 15:50:13 +02:00
for (auto& info : tx_info) {
if (!info.result)
continue;
2023-04-13 15:50:13 +02:00
if (m_mempool.have_tx(info.tx_hash)) {
log::debug(logcat, "tx {} already has a transaction in tx_pool", info.tx_hash);
info.already_have = true;
} else if (m_blockchain_storage.have_tx(info.tx_hash)) {
log::debug(logcat, "tx {} already has a transaction in tx_pool", info.tx_hash);
info.already_have = true;
}
}
parse_incoming_tx_accumulated_batch(tx_info, opts.kept_by_block);
return tx_info;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
bool core::handle_parsed_txs(
std::vector<tx_verification_batch_info>& parsed_txs,
const tx_pool_options& opts,
uint64_t* blink_rollback_height) {
// Caller needs to do this around both this *and* parse_incoming_txs
2023-04-13 15:50:13 +02:00
// auto lock = incoming_tx_lock();
auto version = m_blockchain_storage.get_network_version();
bool ok = true;
if (blink_rollback_height)
2023-04-13 15:50:13 +02:00
*blink_rollback_height = 0;
tx_pool_options tx_opts;
for (size_t i = 0; i < parsed_txs.size(); i++) {
2023-04-13 15:50:13 +02:00
auto& info = parsed_txs[i];
if (!info.result) {
ok = false; // Propagate failures (so this can be chained with parse_incoming_txs
// without an intermediate check)
continue;
}
if (opts.kept_by_block)
get_blockchain_storage().on_new_tx_from_block(info.tx);
if (info.already_have)
continue; // Not a failure
const size_t weight = get_transaction_weight(info.tx, info.blob->size());
const tx_pool_options* local_opts = &opts;
if (blink_rollback_height && info.approved_blink) {
// If this is an approved blink then pass a copy of the options with the flag added
tx_opts = opts;
tx_opts.approved_blink = true;
local_opts = &tx_opts;
}
if (m_mempool.add_tx(
info.tx,
info.tx_hash,
*info.blob,
weight,
info.tvc,
*local_opts,
version,
blink_rollback_height)) {
log::debug(logcat, "tx added: {}", info.tx_hash);
} else {
ok = false;
if (info.tvc.m_verifivation_failed)
log::error(log::Cat("verify"), "Transaction verification failed: {}", info.tx_hash);
else if (info.tvc.m_verifivation_impossible)
log::error(
log::Cat("verify"),
"Transaction verification impossible: {}",
info.tx_hash);
}
}
return ok;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::vector<cryptonote::tx_verification_batch_info> core::handle_incoming_txs(
const std::vector<std::string>& tx_blobs, const tx_pool_options& opts) {
auto lock = incoming_tx_lock();
auto parsed = parse_incoming_txs(tx_blobs, opts);
handle_parsed_txs(parsed, opts);
return parsed;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_tx(
const std::string& tx_blob, tx_verification_context& tvc, const tx_pool_options& opts) {
2022-05-20 23:29:48 +02:00
const std::vector<std::string> tx_blobs{{tx_blob}};
auto parsed = handle_incoming_txs(tx_blobs, opts);
2023-04-13 15:50:13 +02:00
parsed[0].blob = &tx_blob; // Update pointer to the input rather than the copy in case the
// caller wants to use it for some reason
tvc = parsed[0].tvc;
return parsed[0].result && (parsed[0].already_have || tvc.m_added_to_pool);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::pair<std::vector<std::shared_ptr<blink_tx>>, std::unordered_set<crypto::hash>>
core::parse_incoming_blinks(const std::vector<serializable_blink_metadata>& blinks) {
std::pair<std::vector<std::shared_ptr<blink_tx>>, std::unordered_set<crypto::hash>> results;
2023-04-13 15:50:13 +02:00
auto& new_blinks = results.first;
auto& missing_txs = results.second;
2019-11-18 23:38:16 +01:00
if (m_blockchain_storage.get_network_version() < feature::BLINK)
2023-04-13 15:50:13 +02:00
return results;
2023-04-13 15:50:13 +02:00
std::vector<uint8_t> want(
blinks.size(), false); // Really bools, but std::vector<bool> is broken.
size_t want_count = 0;
// Step 1: figure out which referenced transactions we want to keep:
// - unknown tx (typically an incoming blink)
// - in mempool without blink sigs (it's possible to get the tx before the blink signatures)
// - in a recent, still-mutable block with blink sigs (can happen when syncing blocks before
// retrieving blink signatures)
{
2023-04-13 15:50:13 +02:00
std::vector<crypto::hash> hashes;
hashes.reserve(blinks.size());
for (auto& bm : blinks)
hashes.emplace_back(bm.tx_hash);
std::unique_lock<Blockchain> lock(m_blockchain_storage);
auto tx_block_heights = m_blockchain_storage.get_transactions_heights(hashes);
auto immutable_height = m_blockchain_storage.get_immutable_height();
auto& db = m_blockchain_storage.get_db();
for (size_t i = 0; i < blinks.size(); i++) {
if (tx_block_heights[i] == 0 /*mempool or unknown*/ ||
tx_block_heights[i] > immutable_height /*mined but not yet immutable*/) {
want[i] = true;
want_count++;
}
}
}
2023-04-13 15:50:13 +02:00
log::debug(
logcat,
"Want {} of {} incoming blink signature sets after filtering out immutable txes",
want_count,
blinks.size());
if (!want_count)
return results;
// Step 2: filter out any transactions for which we already have a blink signature
{
2023-04-13 15:50:13 +02:00
auto mempool_lock = m_mempool.blink_shared_lock();
for (size_t i = 0; i < blinks.size(); i++) {
if (want[i] && m_mempool.has_blink(blinks[i].tx_hash)) {
log::debug(
logcat,
"Ignoring blink data for {}: already have blink signatures",
blinks[i].tx_hash);
want[i] = false; // Already have it, move along
want_count--;
}
}
}
2023-04-13 15:50:13 +02:00
log::debug(
logcat,
"Want {} of {} incoming blink signature sets after filtering out existing blink sigs",
want_count,
blinks.size());
if (!want_count)
return results;
// Step 3: create new blink_tx objects for txes and add the blink signatures. We can do all of
// this without a lock since these are (for now) just local instances.
new_blinks.reserve(want_count);
std::unordered_map<uint64_t, std::shared_ptr<const service_nodes::quorum>> quorum_cache;
2023-04-13 15:50:13 +02:00
for (size_t i = 0; i < blinks.size(); i++) {
if (!want[i])
continue;
auto& bdata = blinks[i];
new_blinks.push_back(std::make_shared<blink_tx>(bdata.height, bdata.tx_hash));
auto& blink = *new_blinks.back();
// Data structure checks (we have more stringent checks for validity later, but if these
// fail now then there's no point of even trying to do signature validation.
if (bdata.signature.size() !=
bdata.position
.size() || // Each signature must have an associated quorum position
bdata.signature.size() != bdata.quorum.size() || // and quorum index
bdata.signature.size() <
service_nodes::BLINK_MIN_VOTES *
tools::enum_count<blink_tx::subquorum> || // too few signatures for
// possible validity
bdata.signature.size() >
service_nodes::BLINK_SUBQUORUM_SIZE *
tools::enum_count<blink_tx::subquorum> || // too many signatures
blink_tx::quorum_height(bdata.height, blink_tx::subquorum::base) ==
0 || // Height is too early (no blink quorum height)
std::any_of(
bdata.position.begin(),
bdata.position.end(),
[](const auto& p) {
return p >= service_nodes::BLINK_SUBQUORUM_SIZE;
}) || // invalid position
std::any_of(
bdata.quorum.begin(),
bdata.quorum.end(),
[](const auto& qi) {
return qi >= tools::enum_count<blink_tx::subquorum>;
}) // invalid quorum index
) {
log::info(logcat, "Invalid blink tx {}: invalid signature data", bdata.tx_hash);
continue;
}
2023-04-13 15:50:13 +02:00
bool no_quorum = false;
std::array<const std::vector<crypto::public_key>*, tools::enum_count<blink_tx::subquorum>>
validators;
for (uint8_t qi = 0; qi < tools::enum_count<blink_tx::subquorum>; qi++) {
auto q_height = blink.quorum_height(static_cast<blink_tx::subquorum>(qi));
auto& q = quorum_cache[q_height];
if (!q)
q = get_quorum(service_nodes::quorum_type::blink, q_height);
if (!q) {
log::trace(
logcat,
"Don't have a quorum for height {} (yet?), ignoring this blink",
q_height);
no_quorum = true;
break;
}
validators[qi] = &q->validators;
}
if (no_quorum)
continue;
std::vector<std::pair<size_t, std::string>> failures;
for (size_t s = 0; s < bdata.signature.size(); s++) {
try {
blink.add_signature(
static_cast<blink_tx::subquorum>(bdata.quorum[s]),
bdata.position[s],
true /*approved*/,
bdata.signature[s],
validators[bdata.quorum[s]]->at(bdata.position[s]));
} catch (const std::exception& e) {
failures.emplace_back(s, e.what());
}
}
if (blink.approved()) {
log::info(
logcat,
"Blink tx {} blink signatures approved with {} signature validation failures",
bdata.tx_hash,
failures.size());
for (auto& f : failures)
log::debug(
logcat,
"- failure for quorum {}, position {}: {}",
int(bdata.quorum[f.first]),
int(bdata.position[f.first]),
f.second);
} else {
std::string blink_error = "Blink validation failed:";
auto append = std::back_inserter(blink_error);
for (auto& f : failures)
fmt::format_to(
append,
" [{}:{}]: {}",
int(bdata.quorum[f.first]),
int(bdata.position[f.first]),
f.second);
log::info(logcat, "Invalid blink tx {}: {}", bdata.tx_hash, blink_error);
}
}
return results;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
int core::add_blinks(const std::vector<std::shared_ptr<blink_tx>>& blinks) {
int added = 0;
if (blinks.empty())
2023-04-13 15:50:13 +02:00
return added;
auto lock = m_mempool.blink_unique_lock();
2023-04-13 15:50:13 +02:00
for (auto& b : blinks)
if (b->approved())
if (m_mempool.add_existing_blink(b))
added++;
2023-04-13 15:50:13 +02:00
if (added) {
log::info(logcat, "Added blink signatures for {} blinks", added);
long_poll_trigger(m_mempool);
}
return added;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
//-----------------------------------------------------------------------------------------------
std::future<std::pair<blink_result, std::string>> core::handle_blink_tx(
const std::string& tx_blob) {
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
return quorumnet_send_blink(*this, tx_blob);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_semantic(const transaction& tx, bool keeped_by_block) const {
if (tx.is_transfer()) {
if (tx.vin.empty()) {
log::error(
log::Cat("verify"),
"tx with empty inputs, rejected for tx id= {}",
get_transaction_hash(tx));
return false;
}
} else {
if (tx.vin.size() != 0) {
log::error(
log::Cat("verify"),
"tx type: {} must have 0 inputs, received: {}, rejected for tx id = {}",
tx.type,
tx.vin.size(),
get_transaction_hash(tx));
return false;
}
}
2023-04-13 15:50:13 +02:00
if (!check_inputs_types_supported(tx)) {
log::error(
log::Cat("verify"),
"unsupported input types for tx id= {}",
get_transaction_hash(tx));
return false;
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
if (!check_outs_valid(tx)) {
log::error(
log::Cat("verify"),
"tx with invalid outputs, rejected for tx id= {}",
get_transaction_hash(tx));
return false;
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
if (tx.version >= txversion::v2_ringct) {
if (tx.rct_signatures.outPk.size() != tx.vout.size()) {
log::error(
log::Cat("verify"),
"tx with mismatched vout/outPk count, rejected for tx id= {}",
get_transaction_hash(tx));
return false;
}
2014-03-03 23:07:58 +01:00
}
Service Node Deregister Part 5 (#89) * Retrieve quorum list from height, reviewed * Setup data structures for de/register TX * Submit and validate partial/full deregisters * Add P2P relaying of partial deregistration votes * Code review adjustments for deregistration part 1 - Fix check_tx_semantic - Remove signature_pod as votes are now stored as blobs. Serialization overrides don't intefere with crypto::signature anymore. * deregistration_vote_pool - changed sign/verify interface and removed repeated code * Misc review, fix sign/verify api, vote threshold * Deregister/tx edge case handling for combinatoric votes * core, service_node_list: separated address from service node pubkey * Retrieve quorum list from height, reviewed * Setup data structures for de/register TX * Submit and validate partial/full deregisters * Add P2P relaying of partial deregistration votes * Code review adjustments for deregistration part 1 - Fix check_tx_semantic - Remove signature_pod as votes are now stored as blobs. Serialization overrides don't intefere with crypto::signature anymore. * deregistration_vote_pool - changed sign/verify interface and removed repeated code * Misc review, fix sign/verify api, vote threshold * Deregister/tx edge case handling for combinatoric votes * Store service node lists for the duration of deregister lifetimes * Quorum min/max bug, sort node list, fix node to test list * Change quorum to store acc pub address, fix oob bug * Code review for expiring votes, acc keys to pub_key, improve err msgs * Add early out for is_deregistration_tx and protect against quorum changes * Remove debug code, fix segfault * Remove irrelevant check for tx v3 in blockchain, fix >= height for pruning quorum states Incorrect assumption that a transaction can be kept in the chain if it could eventually become invalid, because if it were the chain would be split and eventually these transaction would be dropped. But also that we should not override the pre-existing logic which handles this case anyway.
2018-07-18 04:42:47 +02:00
2023-04-13 15:50:13 +02:00
if (!check_money_overflow(tx)) {
log::error(
log::Cat("verify"),
"tx has money overflow, rejected for tx id= {}",
get_transaction_hash(tx));
return false;
}
2014-03-03 23:07:58 +01:00
2023-04-13 15:50:13 +02:00
if (tx.version == txversion::v1) {
uint64_t amount_in = 0;
get_inputs_money_amount(tx, amount_in);
uint64_t amount_out = get_outs_money_amount(tx);
2014-03-03 23:07:58 +01:00
2023-04-13 15:50:13 +02:00
if (amount_in <= amount_out) {
log::error(
log::Cat("verify"),
"tx with wrong amounts: ins {}, outs {}, rejected for tx id= {}",
amount_in,
amount_out,
get_transaction_hash(tx));
return false;
}
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
if (!keeped_by_block &&
get_transaction_weight(tx) >=
m_blockchain_storage.get_current_cumulative_block_weight_limit() -
COINBASE_BLOB_RESERVED_SIZE) {
log::error(
log::Cat("verify"),
"tx is too large {}, expected not bigger than {}",
get_transaction_weight(tx),
m_blockchain_storage.get_current_cumulative_block_weight_limit() -
COINBASE_BLOB_RESERVED_SIZE);
return false;
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
if (!check_tx_inputs_keyimages_diff(tx)) {
log::error(log::Cat("verify"), "tx uses a single key image more than once");
return false;
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
if (!check_tx_inputs_ring_members_diff(tx)) {
log::error(log::Cat("verify"), "tx uses duplicate ring members");
return false;
}
2023-04-13 15:50:13 +02:00
if (!check_tx_inputs_keyimages_domain(tx)) {
log::error(log::Cat("verify"), "tx uses key image not in the valid domain");
return false;
}
2014-03-03 23:07:58 +01:00
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_service_node_time() {
2023-04-13 15:50:13 +02:00
if (!is_active_sn()) {
return true;
}
crypto::public_key pubkey = m_service_node_list.get_random_pubkey();
crypto::x25519_public_key x_pkey{0};
2023-04-13 15:50:13 +02:00
constexpr std::array<uint16_t, 3> MIN_TIMESTAMP_VERSION{9, 1, 0};
std::array<uint16_t, 3> proofversion;
m_service_node_list.access_proof(pubkey, [&](auto& proof) {
x_pkey = proof.pubkey_x25519;
proofversion = proof.proof->version;
});
if (proofversion >= MIN_TIMESTAMP_VERSION && x_pkey) {
2023-04-13 15:50:13 +02:00
m_omq->request(
tools::view_guts(x_pkey),
"quorum.timestamp",
[this, pubkey](bool success, std::vector<std::string> data) {
const time_t local_seconds = time(nullptr);
log::debug(
logcat,
"Timestamp message received: {}, local time is: ",
data[0],
local_seconds);
if (success) {
int64_t received_seconds;
if (tools::parse_int(data[0], received_seconds)) {
uint16_t variance;
if (received_seconds > local_seconds + 65535 ||
received_seconds < local_seconds - 65535) {
variance = 65535;
} else {
variance = std::abs(local_seconds - received_seconds);
}
std::lock_guard<std::mutex> lk(m_sn_timestamp_mutex);
// Records the variance into the record of our performance (m_sn_times)
service_nodes::timesync_entry entry{
variance <= service_nodes::THRESHOLD_SECONDS_OUT_OF_SYNC};
m_sn_times.add(entry);
// Counts the number of times we have been out of sync
if (m_sn_times.failures() >
(m_sn_times.size() * service_nodes::MAXIMUM_EXTERNAL_OUT_OF_SYNC /
100)) {
log::warning(logcat, "service node time might be out of sync");
// If we are out of sync record the other service node as in sync
m_service_node_list.record_timesync_status(pubkey, true);
} else {
m_service_node_list.record_timesync_status(
pubkey,
variance <= service_nodes::THRESHOLD_SECONDS_OUT_OF_SYNC);
}
} else {
success = false;
}
}
m_service_node_list.record_timestamp_participation(pubkey, success);
});
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::is_key_image_spent(const crypto::key_image& key_image) const {
return m_blockchain_storage.have_tx_keyimg_as_spent(key_image);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::are_key_images_spent(
const std::vector<crypto::key_image>& key_im, std::vector<bool>& spent) const {
spent.clear();
2023-04-13 15:50:13 +02:00
for (auto& ki : key_im) {
spent.push_back(m_blockchain_storage.have_tx_keyimg_as_spent(ki));
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
size_t core::get_block_sync_size(uint64_t height) const {
return block_sync_size > 0 ? block_sync_size : BLOCKS_SYNCHRONIZING_DEFAULT_COUNT;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::are_key_images_spent_in_pool(
const std::vector<crypto::key_image>& key_im, std::vector<bool>& spent) const {
spent.clear();
return m_mempool.check_for_key_images(key_im, spent);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::optional<std::tuple<int64_t, int64_t, int64_t>> core::get_coinbase_tx_sum(
uint64_t start_offset, size_t count) {
std::optional<std::tuple<int64_t, int64_t, int64_t>> result{{0, 0, 0}};
if (count == 0)
2023-04-13 15:50:13 +02:00
return result;
auto& [emission_amount, total_fee_amount, burnt_oxen] = *result;
// Caching.
//
// Requesting this value from the beginning of the chain is very slow, so we cache it. That
// still means the first request will be slow, but that's okay. To prevent a bunch of threads
// getting backed up trying to calculate this, we lock out more than one thread building the
// cache at a time if we're requesting a large number of block values at once. Any other thread
// requesting will get a nullopt back.
2023-04-13 15:50:13 +02:00
constexpr uint64_t CACHE_LAG = 30; // We cache the values up to this many blocks ago; we lag so
// that we don't have to worry about small reorgs
constexpr uint64_t CACHE_EXCLUSIVE =
1000; // If we need to load more than this, we block out other threads
// Check if we have a cacheable from-the-beginning result
uint64_t cache_to = 0;
std::chrono::steady_clock::time_point cache_build_started;
if (start_offset == 0) {
2023-04-13 15:50:13 +02:00
uint64_t height = m_blockchain_storage.get_current_blockchain_height();
if (count > height)
count = height;
cache_to = height - std::min(CACHE_LAG, height);
{
std::shared_lock lock{m_coinbase_cache.mutex};
if (m_coinbase_cache.height && count >= m_coinbase_cache.height) {
emission_amount = m_coinbase_cache.emissions;
total_fee_amount = m_coinbase_cache.fees;
burnt_oxen = m_coinbase_cache.burnt;
start_offset = m_coinbase_cache.height + 1;
count -= m_coinbase_cache.height;
}
// else don't change anything; we need a subset of blocks that ends before the cache.
if (cache_to <= m_coinbase_cache.height)
cache_to = 0; // Cache doesn't need updating
}
2023-04-13 15:50:13 +02:00
// If we're loading a lot then acquire an exclusive lock, recheck our variables, and block
// out other threads until we're done. (We don't do this if we're only loading a few
// because even if we have some competing cache updates they don't hurt anything).
if (cache_to > 0 && count > CACHE_EXCLUSIVE) {
2023-04-13 15:50:13 +02:00
std::unique_lock lock{m_coinbase_cache.mutex};
if (m_coinbase_cache.building)
return std::nullopt; // Another thread is already updating the cache
if (m_coinbase_cache.height && m_coinbase_cache.height >= start_offset) {
// Someone else updated the cache while we were acquiring the unique lock, so update
// our variables
if (m_coinbase_cache.height >= start_offset + count) {
// The cache is now *beyond* us, which means we can't use it, so reset
// start/count back to what they were originally.
count += start_offset - 1;
start_offset = 0;
cache_to = 0;
} else {
// The cache is updated and we can still use it, so update our variables.
emission_amount = m_coinbase_cache.emissions;
total_fee_amount = m_coinbase_cache.fees;
burnt_oxen = m_coinbase_cache.burnt;
count -= m_coinbase_cache.height - start_offset + 1;
start_offset = m_coinbase_cache.height + 1;
}
}
if (cache_to > 0 && count > CACHE_EXCLUSIVE) {
cache_build_started = std::chrono::steady_clock::now();
m_coinbase_cache.building = true; // Block out other threads until we're done
log::info(
logcat,
"Starting slow cache build request for get_coinbase_tx_sum({}, {})",
start_offset,
count);
}
}
}
const uint64_t end = start_offset + count - 1;
2023-04-13 15:50:13 +02:00
m_blockchain_storage.for_blocks_range(
start_offset,
end,
[this, &cache_to, &result, &cache_build_started](
uint64_t height, const crypto::hash& hash, const block& b) {
auto& [emission_amount, total_fee_amount, burnt_oxen] = *result;
std::vector<transaction> txs;
auto coinbase_amount = static_cast<int64_t>(get_outs_money_amount(b.miner_tx));
get_transactions(b.tx_hashes, txs);
int64_t tx_fee_amount = 0;
for (const auto& tx : txs) {
tx_fee_amount += static_cast<int64_t>(
get_tx_miner_fee(tx, b.major_version >= feature::FEE_BURNING));
if (b.major_version >= feature::FEE_BURNING) {
burnt_oxen +=
static_cast<int64_t>(get_burned_amount_from_tx_extra(tx.extra));
}
}
emission_amount += coinbase_amount - tx_fee_amount;
total_fee_amount += tx_fee_amount;
if (cache_to && cache_to == height) {
std::unique_lock lock{m_coinbase_cache.mutex};
if (m_coinbase_cache.height < height) {
m_coinbase_cache.height = height;
m_coinbase_cache.emissions = emission_amount;
m_coinbase_cache.fees = total_fee_amount;
m_coinbase_cache.burnt = burnt_oxen;
}
if (m_coinbase_cache.building) {
m_coinbase_cache.building = false;
log::info(
logcat,
"Finishing cache build for get_coinbase_tx_sum in {} s",
std::chrono::duration<double>{
std::chrono::steady_clock::now() - cache_build_started}
.count());
}
cache_to = 0;
}
return true;
});
2016-10-10 21:45:51 +02:00
return result;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_keyimages_diff(const transaction& tx) const {
2014-03-03 23:07:58 +01:00
std::unordered_set<crypto::key_image> ki;
2023-04-13 15:50:13 +02:00
for (const auto& in : tx.vin) {
CHECKED_GET_SPECIFIC_VARIANT(in, txin_to_key, tokey_in, false);
if (!ki.insert(tokey_in.k_image).second)
return false;
2014-03-03 23:07:58 +01:00
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_ring_members_diff(const transaction& tx) const {
const auto version = m_blockchain_storage.get_network_version();
2023-04-13 15:50:13 +02:00
for (const auto& in : tx.vin) {
CHECKED_GET_SPECIFIC_VARIANT(in, txin_to_key, tokey_in, false);
for (size_t n = 1; n < tokey_in.key_offsets.size(); ++n)
if (tokey_in.key_offsets[n] == 0)
return false;
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_tx_inputs_keyimages_domain(const transaction& tx) const {
std::unordered_set<crypto::key_image> ki;
2023-04-13 15:50:13 +02:00
for (const auto& in : tx.vin) {
CHECKED_GET_SPECIFIC_VARIANT(in, txin_to_key, tokey_in, false);
if (!(rct::scalarmultKey(rct::ki2rct(tokey_in.k_image), rct::curveOrder()) ==
rct::identity()))
return false;
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
size_t core::get_blockchain_total_transactions() const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_total_transactions();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::relay_txpool_transactions() {
// we attempt to relay txes that should be relayed, but were not
2022-05-20 23:29:48 +02:00
std::vector<std::pair<crypto::hash, std::string>> txs;
2023-04-13 15:50:13 +02:00
if (m_mempool.get_relayable_transactions(txs) && !txs.empty()) {
cryptonote_connection_context fake_context{};
tx_verification_context tvc{};
NOTIFY_NEW_TRANSACTIONS::request r{};
for (auto it = txs.begin(); it != txs.end(); ++it) {
r.txs.push_back(it->second);
}
get_protocol()->relay_transactions(r, fake_context);
m_mempool.set_relayed(txs);
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::submit_uptime_proof() {
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (!m_service_node)
2023-04-13 15:50:13 +02:00
return true;
cryptonote_connection_context fake_context{};
bool relayed;
auto height = get_current_blockchain_height();
2023-04-13 15:50:13 +02:00
auto proof = m_service_node_list.generate_uptime_proof(
m_sn_public_ip,
storage_https_port(),
storage_omq_port(),
ss_version,
m_quorumnet_port,
lokinet_version);
NOTIFY_BTENCODED_UPTIME_PROOF::request req = proof.generate_request();
relayed = get_protocol()->relay_btencoded_uptime_proof(req, fake_context);
// TODO: remove after HF19
2023-04-13 15:50:13 +02:00
if (relayed &&
tools::view_guts(m_service_keys.pub) != tools::view_guts(m_service_keys.pub_ed25519)) {
// Temp workaround: nodes with both pub and ed25519 are failing bt-encoded proofs, so send
// an old-style proof out as well as a workaround.
NOTIFY_UPTIME_PROOF::request req = m_service_node_list.generate_uptime_proof(
m_sn_public_ip, storage_https_port(), storage_omq_port(), m_quorumnet_port);
get_protocol()->relay_uptime_proof(req, fake_context);
}
if (relayed)
2023-04-13 15:50:13 +02:00
log::info(
logcat, "Submitted uptime-proof for Service Node (yours): {}", m_service_keys.pub);
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::handle_uptime_proof(
const NOTIFY_UPTIME_PROOF::request& proof, bool& my_uptime_proof_confirmation) {
crypto::x25519_public_key pkey = {};
2023-04-13 15:50:13 +02:00
bool result =
m_service_node_list.handle_uptime_proof(proof, my_uptime_proof_confirmation, pkey);
if (result && m_service_node_list.is_service_node(proof.pubkey, true /*require_active*/) &&
pkey) {
oxenmq::pubkey_set added;
added.insert(tools::copy_guts(pkey));
m_omq->update_active_sns(added, {} /*removed*/);
}
return result;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::handle_btencoded_uptime_proof(
const NOTIFY_BTENCODED_UPTIME_PROOF::request& req, bool& my_uptime_proof_confirmation) {
crypto::x25519_public_key pkey = {};
2021-01-28 01:07:57 +01:00
auto proof = std::make_unique<uptime_proof::Proof>(req.proof);
proof->sig = tools::make_from_guts<crypto::signature>(req.sig);
proof->sig_ed25519 = tools::make_from_guts<crypto::ed25519_signature>(req.ed_sig);
auto pubkey = proof->pubkey;
2023-04-13 15:50:13 +02:00
bool result = m_service_node_list.handle_btencoded_uptime_proof(
std::move(proof), my_uptime_proof_confirmation, pkey);
if (result && m_service_node_list.is_service_node(pubkey, true /*require_active*/) && pkey) {
oxenmq::pubkey_set added;
added.insert(tools::copy_guts(pkey));
m_omq->update_active_sns(added, {} /*removed*/);
}
return result;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
crypto::hash core::on_transaction_relayed(const std::string& tx_blob) {
2022-05-20 23:29:48 +02:00
std::vector<std::pair<crypto::hash, std::string>> txs;
cryptonote::transaction tx;
crypto::hash tx_hash;
2023-04-13 15:50:13 +02:00
if (!parse_and_validate_tx_from_blob(tx_blob, tx, tx_hash)) {
log::error(logcat, "Failed to parse relayed transaction");
return crypto::null<crypto::hash>;
}
txs.push_back(std::make_pair(tx_hash, std::move(tx_blob)));
m_mempool.set_relayed(txs);
return tx_hash;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::relay_service_node_votes() {
auto height = get_current_blockchain_height();
auto hf_version = get_network_version(m_nettype, height);
auto quorum_votes = m_quorum_cop.get_relayable_votes(height, hf_version, true);
2023-04-13 15:50:13 +02:00
auto p2p_votes = m_quorum_cop.get_relayable_votes(height, hf_version, false);
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
if (!quorum_votes.empty() && m_quorumnet_state && m_service_node)
2023-04-13 15:50:13 +02:00
quorumnet_relay_obligation_votes(m_quorumnet_state, quorum_votes);
2023-04-13 15:50:13 +02:00
if (!p2p_votes.empty()) {
NOTIFY_NEW_SERVICE_NODE_VOTE::request req{};
req.votes = std::move(p2p_votes);
cryptonote_connection_context fake_context{};
get_protocol()->relay_service_node_votes(req, fake_context);
2019-05-01 08:01:17 +02:00
}
return true;
2023-04-13 15:50:13 +02:00
}
void core::set_service_node_votes_relayed(const std::vector<service_nodes::quorum_vote_t>& votes) {
m_quorum_cop.set_votes_relayed(votes);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::create_next_miner_block_template(
block& b,
const account_public_address& adr,
difficulty_type& diffic,
uint64_t& height,
uint64_t& expected_reward,
const std::string& ex_nonce) {
return m_blockchain_storage.create_next_miner_block_template(
b, adr, diffic, height, expected_reward, ex_nonce);
}
//-----------------------------------------------------------------------------------------------
bool core::create_miner_block_template(
block& b,
const crypto::hash* prev_block,
const account_public_address& adr,
difficulty_type& diffic,
uint64_t& height,
uint64_t& expected_reward,
const std::string& ex_nonce) {
return m_blockchain_storage.create_miner_block_template(
b, prev_block, adr, diffic, height, expected_reward, ex_nonce);
}
//-----------------------------------------------------------------------------------------------
bool core::find_blockchain_supplement(
const std::list<crypto::hash>& qblock_ids,
NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.find_blockchain_supplement(qblock_ids, resp);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::find_blockchain_supplement(
const uint64_t req_start_block,
const std::list<crypto::hash>& qblock_ids,
std::vector<std::pair<
std::pair<std::string, crypto::hash>,
std::vector<std::pair<crypto::hash, std::string>>>>& blocks,
uint64_t& total_height,
uint64_t& start_height,
bool pruned,
bool get_miner_tx_hash,
size_t max_count) const {
return m_blockchain_storage.find_blockchain_supplement(
req_start_block,
qblock_ids,
blocks,
total_height,
start_height,
pruned,
get_miner_tx_hash,
max_count);
}
//-----------------------------------------------------------------------------------------------
bool core::get_outs(
const rpc::GET_OUTPUTS_BIN::request& req, rpc::GET_OUTPUTS_BIN::response& res) const {
return m_blockchain_storage.get_outs(req, res);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_output_distribution(
uint64_t amount,
uint64_t from_height,
uint64_t to_height,
uint64_t& start_height,
std::vector<uint64_t>& distribution,
uint64_t& base) const {
return m_blockchain_storage.get_output_distribution(
amount, from_height, to_height, start_height, distribution, base);
}
//-----------------------------------------------------------------------------------------------
void core::get_output_blacklist(std::vector<uint64_t>& blacklist) const {
m_blockchain_storage.get_output_blacklist(blacklist);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<uint64_t>& indexs) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_tx_outputs_gindexs(tx_id, indexs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_tx_outputs_gindexs(
const crypto::hash& tx_id,
size_t n_txes,
std::vector<std::vector<uint64_t>>& indexs) const {
return m_blockchain_storage.get_tx_outputs_gindexs(tx_id, n_txes, indexs);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::pause_mine() {
2014-03-03 23:07:58 +01:00
m_miner.pause();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::resume_mine() {
2014-03-03 23:07:58 +01:00
m_miner.resume();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
block_complete_entry get_block_complete_entry(block& b, tx_memory_pool& pool) {
block_complete_entry bce = {};
2023-04-13 15:50:13 +02:00
bce.block = cryptonote::block_to_blob(b);
for (const auto& tx_hash : b.tx_hashes) {
std::string txblob;
CHECK_AND_ASSERT_THROW_MES(
pool.get_transaction(tx_hash, txblob), "Transaction not found in pool");
bce.txs.push_back(txblob);
}
return bce;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::handle_block_found(block& b, block_verification_context& bvc) {
bvc = {};
std::vector<block_complete_entry> blocks;
m_miner.pause();
{
2023-04-13 15:50:13 +02:00
OXEN_DEFER {
m_miner.resume();
};
try {
blocks.push_back(get_block_complete_entry(b, m_mempool));
} catch (const std::exception& e) {
return false;
}
std::vector<block> pblocks;
if (!prepare_handle_incoming_blocks(blocks, pblocks)) {
log::error(logcat, "Block found, but failed to prepare to add");
return false;
}
// add_new_block will verify block and set bvc.m_verification_failed accordingly
add_new_block(b, bvc, nullptr /*checkpoint*/);
cleanup_handle_incoming_blocks(true);
m_miner.on_block_chain_update();
}
if (bvc.m_verifivation_failed) {
bool pulse = cryptonote::block_has_pulse_components(b);
log::error(
log::Cat("verify"),
"{} block failed verification\n{}",
(pulse ? "Pulse" : "Mined"),
cryptonote::obj_to_json_str(b));
return false;
2023-04-13 15:50:13 +02:00
} else if (bvc.m_added_to_main_chain) {
std::unordered_set<crypto::hash> missed_txs;
std::vector<std::string> txs;
m_blockchain_storage.get_transactions_blobs(b.tx_hashes, txs, &missed_txs);
if (missed_txs.size() &&
m_blockchain_storage.get_block_id_by_height(get_block_height(b)) != get_block_hash(b)) {
log::info(
logcat,
"Block found but, seems that reorganize just happened after that, do not relay "
"this block");
return true;
}
CHECK_AND_ASSERT_MES(
txs.size() == b.tx_hashes.size() && !missed_txs.size(),
false,
"can't find some transactions in found block:"
<< get_block_hash(b) << " txs.size()=" << txs.size()
<< ", b.tx_hashes.size()=" << b.tx_hashes.size() << ", missed_txs.size()"
<< missed_txs.size());
2014-03-03 23:07:58 +01:00
2023-04-13 15:50:13 +02:00
cryptonote_connection_context exclude_context{};
NOTIFY_NEW_FLUFFY_BLOCK::request arg{};
arg.current_blockchain_height = m_blockchain_storage.get_current_blockchain_height();
arg.b = blocks[0];
2014-03-03 23:07:58 +01:00
2023-04-13 15:50:13 +02:00
m_pprotocol->relay_block(arg, exclude_context);
2014-03-03 23:07:58 +01:00
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::on_synchronized() {
2014-03-03 23:07:58 +01:00
m_miner.on_synchronized();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::safesyncmode(const bool onoff) {
m_blockchain_storage.safesyncmode(onoff);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::add_new_block(
const block& b, block_verification_context& bvc, checkpoint_t const* checkpoint) {
bool result = m_blockchain_storage.add_new_block(b, bvc, checkpoint);
if (result)
2023-04-13 15:50:13 +02:00
relay_service_node_votes(); // NOTE: nop if synchronising due to not accepting votes whilst
// syncing
return result;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::prepare_handle_incoming_blocks(
const std::vector<block_complete_entry>& blocks_entry, std::vector<block>& blocks) {
m_incoming_tx_lock.lock();
2023-04-13 15:50:13 +02:00
if (!m_blockchain_storage.prepare_handle_incoming_blocks(blocks_entry, blocks)) {
cleanup_handle_incoming_blocks(false);
return false;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
return true;
2023-04-13 15:50:13 +02:00
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
//-----------------------------------------------------------------------------------------------
bool core::cleanup_handle_incoming_blocks(bool force_sync) {
bool success = false;
try {
2023-04-13 15:50:13 +02:00
success = m_blockchain_storage.cleanup_handle_incoming_blocks(force_sync);
} catch (...) {
}
m_incoming_tx_lock.unlock();
return success;
2023-04-13 15:50:13 +02:00
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
2023-04-13 15:50:13 +02:00
//-----------------------------------------------------------------------------------------------
bool core::handle_incoming_block(
const std::string& block_blob,
const block* b,
block_verification_context& bvc,
checkpoint_t* checkpoint,
bool update_miner_blocktemplate) {
TRY_ENTRY();
bvc = {};
2023-04-13 15:50:13 +02:00
if (!check_incoming_block_size(block_blob)) {
bvc.m_verifivation_failed = true;
return false;
2014-03-03 23:07:58 +01:00
}
if (((size_t)-1) <= 0xffffffff && block_blob.size() >= 0x3fffffff)
2023-04-13 15:50:13 +02:00
log::warning(
logcat, "This block's size is {}, closing on the 32 bit limit", block_blob.size());
2023-04-13 15:50:13 +02:00
CHECK_AND_ASSERT_MES(
update_checkpoints_from_json_file(),
false,
"One or more checkpoints loaded from json conflicted with existing checkpoints.");
block lb;
2023-04-13 15:50:13 +02:00
if (!b) {
crypto::hash block_hash;
if (!parse_and_validate_block_from_blob(block_blob, lb, block_hash)) {
log::info(logcat, "Failed to parse and validate new block");
bvc.m_verifivation_failed = true;
return false;
}
b = &lb;
2014-03-03 23:07:58 +01:00
}
2019-08-14 06:52:51 +02:00
add_new_block(*b, bvc, checkpoint);
2023-04-13 15:50:13 +02:00
if (update_miner_blocktemplate && bvc.m_added_to_main_chain)
m_miner.on_block_chain_update();
2014-03-03 23:07:58 +01:00
return true;
CATCH_ENTRY_L0("core::handle_incoming_block()", false);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
// Used by the RPC server to check the size of an incoming
// block_blob
bool core::check_incoming_block_size(const std::string& block_blob) const {
// note: we assume block weight is always >= block blob size, so we check incoming
// blob size against the block weight limit, which acts as a sanity check without
// having to parse/weigh first; in fact, since the block blob is the block header
// plus the tx hashes, the weight will typically be much larger than the blob size
2023-04-13 15:50:13 +02:00
if (block_blob.size() > m_blockchain_storage.get_current_cumulative_block_weight_limit() +
BLOCK_SIZE_SANITY_LEEWAY) {
log::info(
logcat,
"WRONG BLOCK BLOB, sanity check failed on size {}, rejected",
block_blob.size());
return false;
2014-06-11 23:32:53 +02:00
}
return true;
2023-04-13 15:50:13 +02:00
}
2023-04-13 15:50:13 +02:00
void core::update_omq_sns() {
2021-01-04 05:21:21 +01:00
// TODO: let callers (e.g. lokinet, ss) subscribe to callbacks when this fires
2021-01-14 21:28:50 +01:00
oxenmq::pubkey_set active_sns;
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
m_service_node_list.copy_active_x25519_pubkeys(std::inserter(active_sns, active_sns.end()));
m_omq->set_active_sns(std::move(active_sns));
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
crypto::hash core::get_tail_id() const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_tail_id();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
difficulty_type core::get_block_cumulative_difficulty(uint64_t height) const {
return m_blockchain_storage.get_db().get_block_cumulative_difficulty(height);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::have_block(const crypto::hash& id) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.have_block(id);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
crypto::hash core::get_block_id_by_height(uint64_t height) const {
2014-03-03 23:07:58 +01:00
return m_blockchain_storage.get_block_id_by_height(height);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_block_by_hash(const crypto::hash& h, block& blk, bool* orphan) const {
return m_blockchain_storage.get_block_by_hash(h, blk, orphan);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::get_block_by_height(uint64_t height, block& blk) const {
return m_blockchain_storage.get_block_by_height(height, blk);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
static bool check_external_ping(
time_t last_ping, std::chrono::seconds lifetime, std::string_view what) {
const std::chrono::seconds elapsed{std::time(nullptr) - last_ping};
2023-04-13 15:50:13 +02:00
if (elapsed > lifetime) {
log::warning(
logcat,
"Have not heard from {} {}",
what,
(!last_ping ? "since starting"
: "since more than " + tools::get_human_readable_timespan(elapsed) +
" ago"));
return false;
}
return true;
2023-04-13 15:50:13 +02:00
}
void core::reset_proof_interval() {
m_check_uptime_proof_interval.reset();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::do_uptime_proof_call() {
std::vector<service_nodes::service_node_pubkey_info> const states =
get_service_node_list_state({m_service_keys.pub});
// wait one block before starting uptime proofs (but not on testnet/devnet, where we sometimes
// have mass registrations/deregistrations where the waiting causes problems).
uint64_t delay_blocks = m_nettype == network_type::MAINNET ? 1 : 0;
2023-04-13 15:50:13 +02:00
if (!states.empty() &&
(states[0].info->registration_height + delay_blocks) < get_current_blockchain_height()) {
m_check_uptime_proof_interval.do_call([this]() {
// This timer is not perfectly precise and can leak seconds slightly, so send the uptime
// proof if we are within half a tick of the target time. (Essentially our target proof
// window becomes the first time this triggers in the 59.75-60.25 minute window).
uint64_t next_proof_time = 0;
m_service_node_list.access_proof(
m_service_keys.pub, [&](auto& proof) { next_proof_time = proof.timestamp; });
auto& netconf = get_net_config();
next_proof_time +=
std::chrono::seconds{
netconf.UPTIME_PROOF_FREQUENCY -
netconf.UPTIME_PROOF_CHECK_INTERVAL / 2}
.count();
if ((uint64_t)std::time(nullptr) < next_proof_time)
return;
2023-04-13 15:50:13 +02:00
auto pubkey = m_service_node_list.get_pubkey_from_x25519(m_service_keys.pub_x25519);
if (pubkey && pubkey != m_service_keys.pub &&
m_service_node_list.is_service_node(pubkey, false /*don't require active*/)) {
log::info(
logcat,
fg(fmt::terminal_color::red),
"Failed to submit uptime proof: another service node on the network is "
"using the same ed/x25519 keys as this service node. This typically means "
"both have the same 'key_ed25519' private key file.");
return;
}
2023-04-13 15:50:13 +02:00
{
std::vector<crypto::public_key> sn_pks;
auto sns = m_service_node_list.get_service_node_list_state();
sn_pks.reserve(sns.size());
for (const auto& sni : sns)
sn_pks.push_back(sni.pubkey);
m_service_node_list.for_each_service_node_info_and_proof(
sn_pks.begin(), sn_pks.end(), [&](auto& pk, auto& sni, auto& proof) {
if (pk != m_service_keys.pub &&
proof.proof->public_ip == m_sn_public_ip &&
(proof.proof->qnet_port == m_quorumnet_port ||
(m_nettype != network_type::DEVNET &&
(proof.proof->storage_https_port == storage_https_port() ||
proof.proof->storage_omq_port == storage_omq_port()))))
log::info(
logcat,
fg(fmt::terminal_color::red),
"Another service node ({}) is broadcasting the same public "
"IP and ports as this service node ({}:{}[qnet], "
":{}[SS-HTTP], :{}[SS-OMQ]). This will lead to "
"deregistration of one or both service nodes if not "
"corrected. (Do both service nodes have the correct IP for "
"the service-node-public-ip setting?)",
pk,
epee::string_tools::get_ip_string_from_int32(
m_sn_public_ip),
proof.proof->qnet_port,
proof.proof->storage_https_port,
proof.proof->storage_omq_port);
});
}
2023-04-13 15:50:13 +02:00
if (m_nettype != network_type::DEVNET) {
if (!check_external_ping(
m_last_storage_server_ping,
get_net_config().UPTIME_PROOF_FREQUENCY,
"the storage server")) {
log::info(
logcat,
fg(fmt::terminal_color::red),
"Failed to submit uptime proof: have not heard from the storage server "
"recently. Make sure that it is running! It is required to run "
"alongside the Loki daemon");
return;
}
if (!check_external_ping(
m_last_lokinet_ping,
get_net_config().UPTIME_PROOF_FREQUENCY,
"Lokinet")) {
log::info(
logcat,
fg(fmt::terminal_color::red),
"Failed to submit uptime proof: have not heard from lokinet recently. "
"Make sure that it is running! It is required to run alongside the "
"Loki daemon");
return;
}
}
submit_uptime_proof();
});
} else {
// reset the interval so that we're ready when we register, OR if we get deregistered this
// primes us up for re-registration in the same session
m_check_uptime_proof_interval.reset();
}
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::on_idle() {
if (!m_starter_message_showed) {
std::string main_message;
if (m_offline)
main_message =
"The daemon is running offline and will not attempt to sync to the Loki "
"network.";
else
main_message =
"The daemon will start synchronizing with the network. This may take a long "
"time to complete.";
log::info(
logcat,
fg(fmt::terminal_color::yellow),
R"(
**********************************************************************
{}
You can set the level of process detailization through "set_log <level|categories>" command,
where <level> is between 0 (no details) and 4 (very verbose), or custom category based levels (eg, *:WARNING).
Use the "help" command to see the list of available commands.
Use "help <command>" to see a command's documentation.
**********************************************************************
2023-04-13 15:50:13 +02:00
)",
main_message);
m_starter_message_showed = true;
2014-03-03 23:07:58 +01:00
}
2020-06-03 05:10:08 +02:00
m_txpool_auto_relayer.do_call([this] { return relay_txpool_transactions(); });
m_service_node_vote_relayer.do_call([this] { return relay_service_node_votes(); });
m_check_disk_space_interval.do_call([this] { return check_disk_space(); });
m_block_rate_interval.do_call([this] { return check_block_rate(); });
2023-04-13 15:50:13 +02:00
m_sn_proof_cleanup_interval.do_call([&snl = m_service_node_list] {
snl.cleanup_proofs();
return true;
});
std::chrono::seconds lifetime{time(nullptr) - get_start_time()};
2023-04-13 15:50:13 +02:00
if (m_service_node &&
lifetime > get_net_config().UPTIME_PROOF_STARTUP_DELAY) // Give us some time to connect to
// peers before sending uptimes
{
2023-04-13 15:50:13 +02:00
do_uptime_proof_call();
}
2020-06-03 05:10:08 +02:00
m_blockchain_pruning_interval.do_call([this] { return update_blockchain_pruning(); });
2014-03-03 23:07:58 +01:00
m_miner.on_idle();
m_mempool.on_idle();
#ifdef ENABLE_SYSTEMD
2023-04-13 15:50:13 +02:00
m_systemd_notify_interval.do_call(
[this] { sd_notify(0, ("WATCHDOG=1\nSTATUS=" + get_status_string()).c_str()); });
#endif
2014-03-03 23:07:58 +01:00
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_disk_space() {
2017-11-12 00:25:12 +01:00
uint64_t free_space = get_free_space();
2023-04-13 15:50:13 +02:00
if (free_space < 1ull * 1024 * 1024 * 1024) // 1 GB
log::warning(
logcat,
fg(fmt::terminal_color::red),
"Free space is below 1 GB on {}",
m_config_folder);
2017-11-12 00:25:12 +01:00
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
double factorial(unsigned int n) {
if (n <= 1)
2023-04-13 15:50:13 +02:00
return 1.0;
double f = n;
while (n-- > 1)
2023-04-13 15:50:13 +02:00
f *= n;
return f;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
static double probability1(unsigned int blocks, unsigned int expected) {
// https://www.umass.edu/wsp/resources/poisson/#computing
return pow(expected, blocks) / (factorial(blocks) * exp(expected));
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
static double probability(unsigned int blocks, unsigned int expected) {
double p = 0.0;
2023-04-13 15:50:13 +02:00
if (blocks <= expected) {
for (unsigned int b = 0; b <= blocks; ++b)
p += probability1(b, expected);
} else if (blocks > expected) {
for (unsigned int b = blocks; b <= expected * 3 /* close enough */; ++b)
p += probability1(b, expected);
}
return p;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_block_rate() {
if (m_offline || m_nettype == network_type::FAKECHAIN ||
m_target_blockchain_height > get_current_blockchain_height() ||
m_target_blockchain_height == 0) {
log::debug(logcat, "Not checking block rate, offline or syncing");
return true;
}
2023-04-13 15:50:13 +02:00
static constexpr double threshold =
1. / ((24h * 10) / TARGET_BLOCK_TIME); // one false positive every 10 days
static constexpr unsigned int max_blocks_checked = 150;
const time_t now = time(NULL);
2023-04-13 15:50:13 +02:00
const std::vector<time_t> timestamps =
m_blockchain_storage.get_last_block_timestamps(max_blocks_checked);
static const unsigned int seconds[] = {5400, 3600, 1800, 1200, 600};
for (size_t n = 0; n < sizeof(seconds) / sizeof(seconds[0]); ++n) {
unsigned int b = 0;
const time_t time_boundary = now - static_cast<time_t>(seconds[n]);
for (time_t ts : timestamps)
b += ts >= time_boundary;
const double p = probability(b, seconds[n] / tools::to_seconds(TARGET_BLOCK_TIME));
log::debug(
logcat,
"blocks in the last {} minutes: {} (probability {})",
seconds[n] / 60,
b,
p);
if (p < threshold) {
log::warning(
logcat,
"There were {}{} blocks in the last {} minutes, \
there might be large hash rate changes, or we might be partitioned, \
cut off from the Loki network or under attack, or your computer's time is off. \
2023-04-13 15:50:13 +02:00
Or it could be just sheer bad luck.",
b,
(b == max_blocks_checked ? " or more" : ""),
seconds[n] / 60);
break; // no need to look further
}
}
return true;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::flush_bad_txs_cache() {
bad_semantics_txes_lock.lock();
for (int idx = 0; idx < 2; ++idx)
2023-04-13 15:50:13 +02:00
bad_semantics_txes[idx].clear();
bad_semantics_txes_lock.unlock();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::flush_invalid_blocks() {
m_blockchain_storage.flush_invalid_blocks();
2023-04-13 15:50:13 +02:00
}
bool core::update_blockchain_pruning() {
return m_blockchain_storage.update_blockchain_pruning();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::check_blockchain_pruning() {
return m_blockchain_storage.check_blockchain_pruning();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::set_target_blockchain_height(uint64_t target_blockchain_height) {
m_target_blockchain_height = target_blockchain_height;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
uint64_t core::get_target_blockchain_height() const {
return m_target_blockchain_height;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
uint64_t core::prevalidate_block_hashes(uint64_t height, const std::vector<crypto::hash>& hashes) {
return get_blockchain_storage().prevalidate_block_hashes(height, hashes);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
uint64_t core::get_free_space() const {
return fs::space(m_config_folder).available;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::shared_ptr<const service_nodes::quorum> core::get_quorum(
service_nodes::quorum_type type,
uint64_t height,
bool include_old,
std::vector<std::shared_ptr<const service_nodes::quorum>>* alt_states) const {
return m_service_node_list.get_quorum(type, height, include_old, alt_states);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::is_service_node(const crypto::public_key& pubkey, bool require_active) const {
Relax deregistration rules The replaces the deregistration mechanism with a new state change mechanism (beginning at the v12 fork) which can change a service node's network status via three potential values (and is extensible in the future to handle more): - deregistered -- this is the same as the existing deregistration; the SN is instantly removed from the SN list. - decommissioned -- this is a sort of temporary deregistration: your SN remains in the service node list, but is removed from the rewards list and from any network duties. - recommissioned -- this tx is sent by a quorum if they observe a decommissioned SN sending uptime proofs again. Upon reception, the SN is reactivated and put on the end of the reward list. Since this is broadening the quorum use, this also renames the relevant quorum to a "obligations" quorum (since it validates SN obligations), while the transactions are "state_change" transactions (since they change the state of a registered SN). The new parameters added to service_node_rules.h control how this works: // Service node decommissioning: as service nodes stay up they earn "credits" (measured in blocks) // towards a future outage. A new service node starts out with INITIAL_CREDIT, and then builds up // CREDIT_PER_DAY for each day the service node remains active up to a maximum of // DECOMMISSION_MAX_CREDIT. // // If a service node stops sending uptime proofs, a quorum will consider whether the service node // has built up enough credits (at least MINIMUM): if so, instead of submitting a deregistration, // it instead submits a decommission. This removes the service node from the list of active // service nodes both for rewards and for any active network duties. If the service node comes // back online (i.e. starts sending the required performance proofs again) before the credits run // out then a quorum will reinstate the service node using a recommission transaction, which adds // the service node back to the bottom of the service node reward list, and resets its accumulated // credits to 0. If it does not come back online within the required number of blocks (i.e. the // accumulated credit at the point of decommissioning) then a quorum will send a permanent // deregistration transaction to the network, starting a 30-day deregistration count down. This commit currently includes values (which are not necessarily finalized): - 8 hours (240 blocks) of credit required for activation of a decommission (rather than a deregister) - 0 initial credits at registration - a maximum of 24 hours (720 blocks) of credits - credits accumulate at a rate that you hit 24 hours of credits after 30 days of operation. Miscellaneous other details of this PR: - a new TX extra tag is used for the state change (including deregistrations). The old extra tag has no version or type tag, so couldn't be reused. The data in the new tag is slightly more efficiently packed than the old deregistration transaction, so it gets used for deregistrations (starting at the v12 fork) as well. - Correct validator/worker selection required generalizing the shuffle function to be able to shuffle just part of a vector. This lets us stick any down service nodes at the end of the potential list, then select validators by only shuffling the part of the index vector that contains active service indices. Once the validators are selected, the remainder of the list (this time including decommissioned SN indices) is shuffled to select quorum workers to check, thus allowing decommisioned nodes to be randomly included in the nodes to check without being selected as a validator. - Swarm recalculation was not quite right: swarms were recalculated on SN registrations, even if those registrations were include shared node registrations, but *not* recalculated on stakes. Starting with the upgrade this behaviour is fixed (swarms aren't actually used currently and aren't consensus-relevant so recalculating early won't hurt anything). - Details on decomm/dereg are added to RPC info and print_sn/print_sn_status - Slightly improves the % of reward output in the print_sn output by rounding it to two digits, and reserves space in the output string to avoid excessive reallocations. - Adds various debugging at higher debug levels to quorum voting (into all of voting itself, vote transmission, and vote reception). - Reset service node list internal data structure version to 0. The SN list has to be rescanned anyway at upgrade (its size has changed), so we might as well reset the version and remove the version-dependent serialization code. (Note that the affected code here is for SN states in lmdb storage, not for SN-to-SN communication serialization).
2019-06-18 23:57:02 +02:00
return m_service_node_list.is_service_node(pubkey, require_active);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
const std::vector<service_nodes::key_image_blacklist_entry>&
core::get_service_node_blacklisted_key_images() const {
return m_service_node_list.get_blacklisted_key_images();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::vector<service_nodes::service_node_pubkey_info> core::get_service_node_list_state(
const std::vector<crypto::public_key>& service_node_pubkeys) const {
return m_service_node_list.get_service_node_list_state(service_node_pubkeys);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::add_service_node_vote(
const service_nodes::quorum_vote_t& vote, vote_verification_context& vvc) {
return m_quorum_cop.handle_vote(vote, vvc);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
uint32_t core::get_blockchain_pruning_seed() const {
return get_blockchain_storage().get_blockchain_pruning_seed();
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
bool core::prune_blockchain(uint32_t pruning_seed) {
return get_blockchain_storage().prune_blockchain(pruning_seed);
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
std::time_t core::get_start_time() const {
return start_time;
2023-04-13 15:50:13 +02:00
}
//-----------------------------------------------------------------------------------------------
void core::graceful_exit() {
raise(SIGTERM);
2014-03-03 23:07:58 +01:00
}
2023-04-13 15:50:13 +02:00
} // namespace cryptonote