2019-04-12 06:36:43 +02:00
// Copyright (c) 2014-2019, The Monero Project
2018-04-10 06:49:20 +02:00
// Copyright (c) 2018, The Loki Project
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// All rights reserved.
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2019-02-15 01:13:27 +01:00
//
2014-07-23 15:03:52 +02:00
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2014-03-03 23:07:58 +01:00
# include "include_base_utils.h"
2017-11-25 23:25:05 +01:00
# include "string_tools.h"
2014-03-03 23:07:58 +01:00
using namespace epee ;
# include "core_rpc_server.h"
# include "common/command_line.h"
2017-02-25 00:16:13 +01:00
# include "common/updates.h"
# include "common/download.h"
2019-01-25 00:50:42 +01:00
# include "common/loki.h"
2017-02-25 00:16:13 +01:00
# include "common/util.h"
2017-10-29 22:10:46 +01:00
# include "common/perf_timer.h"
2017-01-26 16:07:23 +01:00
# include "cryptonote_basic/cryptonote_format_utils.h"
# include "cryptonote_basic/account.h"
# include "cryptonote_basic/cryptonote_basic_impl.h"
2019-04-12 22:20:20 +02:00
# include "cryptonote_core/tx_sanity_check.h"
2014-03-03 23:07:58 +01:00
# include "misc_language.h"
2019-04-09 10:07:13 +02:00
# include "net/parse.h"
2018-01-20 11:38:14 +01:00
# include "storages/http_abstract_invoke.h"
2014-03-03 23:07:58 +01:00
# include "crypto/hash.h"
2017-02-05 23:48:03 +01:00
# include "rpc/rpc_args.h"
2018-10-20 04:06:03 +02:00
# include "rpc/rpc_handler.h"
2014-03-03 23:07:58 +01:00
# include "core_rpc_server_error_codes.h"
2017-11-29 23:53:58 +01:00
# include "p2p/net_node.h"
# include "version.h"
2014-03-03 23:07:58 +01:00
2018-04-10 06:49:20 +02:00
# undef LOKI_DEFAULT_LOG_CATEGORY
# define LOKI_DEFAULT_LOG_CATEGORY "daemon.rpc"
2017-02-27 21:25:35 +01:00
2016-06-29 21:43:14 +02:00
# define MAX_RESTRICTED_FAKE_OUTS_COUNT 40
2017-11-23 20:15:45 +01:00
# define MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT 5000
2016-06-29 21:43:14 +02:00
2017-09-16 11:31:49 +02:00
namespace
{
2019-02-21 13:55:55 +01:00
uint64_t round_up ( uint64_t value , uint64_t quantum )
{
return ( value + quantum - 1 ) / quantum * quantum ;
}
2017-09-16 11:31:49 +02:00
}
2014-03-03 23:07:58 +01:00
namespace cryptonote
{
//-----------------------------------------------------------------------------------
void core_rpc_server : : init_options ( boost : : program_options : : options_description & desc )
{
command_line : : add_arg ( desc , arg_rpc_bind_port ) ;
2017-11-16 04:58:11 +01:00
command_line : : add_arg ( desc , arg_rpc_restricted_bind_port ) ;
2015-11-27 19:24:29 +01:00
command_line : : add_arg ( desc , arg_restricted_rpc ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
command_line : : add_arg ( desc , arg_rpc_ssl ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_private_key ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_certificate ) ;
2019-03-12 03:01:03 +01:00
command_line : : add_arg ( desc , arg_rpc_ssl_ca_certificates ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
command_line : : add_arg ( desc , arg_rpc_ssl_allowed_fingerprints ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
command_line : : add_arg ( desc , arg_rpc_ssl_allow_any_cert ) ;
2018-01-20 11:38:14 +01:00
command_line : : add_arg ( desc , arg_bootstrap_daemon_address ) ;
command_line : : add_arg ( desc , arg_bootstrap_daemon_login ) ;
2017-02-05 23:48:03 +01:00
cryptonote : : rpc_args : : init_options ( desc ) ;
2014-03-03 23:07:58 +01:00
}
//------------------------------------------------------------------------------------------------------------------------------
2014-09-09 16:58:53 +02:00
core_rpc_server : : core_rpc_server (
core & cr
, nodetool : : node_server < cryptonote : : t_cryptonote_protocol_handler < cryptonote : : core > > & p2p
)
: m_core ( cr )
, m_p2p ( p2p )
2014-03-03 23:07:58 +01:00
{ }
//------------------------------------------------------------------------------------------------------------------------------
2017-02-05 23:48:03 +01:00
bool core_rpc_server : : init (
2014-09-08 19:07:15 +02:00
const boost : : program_options : : variables_map & vm
2017-11-16 04:58:11 +01:00
, const bool restricted
, const std : : string & port
2014-09-08 19:07:15 +02:00
)
2014-03-03 23:07:58 +01:00
{
2017-11-16 04:58:11 +01:00
m_restricted = restricted ;
2017-02-05 23:48:03 +01:00
m_net_server . set_threads_prefix ( " RPC " ) ;
auto rpc_config = cryptonote : : rpc_args : : process ( vm ) ;
if ( ! rpc_config )
return false ;
2018-01-20 11:38:14 +01:00
m_bootstrap_daemon_address = command_line : : get_arg ( vm , arg_bootstrap_daemon_address ) ;
if ( ! m_bootstrap_daemon_address . empty ( ) )
{
const std : : string & bootstrap_daemon_login = command_line : : get_arg ( vm , arg_bootstrap_daemon_login ) ;
const auto loc = bootstrap_daemon_login . find ( ' : ' ) ;
if ( ! bootstrap_daemon_login . empty ( ) & & loc ! = std : : string : : npos )
{
epee : : net_utils : : http : : login login ;
login . username = bootstrap_daemon_login . substr ( 0 , loc ) ;
login . password = bootstrap_daemon_login . substr ( loc + 1 ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
m_http_client . set_server ( m_bootstrap_daemon_address , login , epee : : net_utils : : ssl_support_t : : e_ssl_support_autodetect ) ;
2018-01-20 11:38:14 +01:00
}
else
{
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
m_http_client . set_server ( m_bootstrap_daemon_address , boost : : none , epee : : net_utils : : ssl_support_t : : e_ssl_support_autodetect ) ;
2018-01-20 11:38:14 +01:00
}
m_should_use_bootstrap_daemon = true ;
}
else
{
m_should_use_bootstrap_daemon = false ;
}
m_was_bootstrap_ever_used = false ;
2017-02-05 23:48:03 +01:00
boost : : optional < epee : : net_utils : : http : : login > http_login { } ;
2017-11-16 04:58:11 +01:00
2017-02-05 23:48:03 +01:00
if ( rpc_config - > login )
http_login . emplace ( std : : move ( rpc_config - > login - > username ) , std : : move ( rpc_config - > login - > password ) . password ( ) ) ;
2019-03-15 05:03:32 +01:00
epee : : net_utils : : ssl_options_t ssl_options = epee : : net_utils : : ssl_support_t : : e_ssl_support_autodetect ;
if ( command_line : : get_arg ( vm , arg_rpc_ssl_allow_any_cert ) )
ssl_options . verification = epee : : net_utils : : ssl_verification_t : : none ;
else
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
{
2019-03-15 05:03:32 +01:00
std : : string ssl_ca_path = command_line : : get_arg ( vm , arg_rpc_ssl_ca_certificates ) ;
const std : : vector < std : : string > ssl_allowed_fingerprint_strings = command_line : : get_arg ( vm , arg_rpc_ssl_allowed_fingerprints ) ;
std : : vector < std : : vector < uint8_t > > ssl_allowed_fingerprints { ssl_allowed_fingerprint_strings . size ( ) } ;
std : : transform ( ssl_allowed_fingerprint_strings . begin ( ) , ssl_allowed_fingerprint_strings . end ( ) , ssl_allowed_fingerprints . begin ( ) , epee : : from_hex : : vector ) ;
if ( ! ssl_ca_path . empty ( ) | | ! ssl_allowed_fingerprints . empty ( ) )
ssl_options = epee : : net_utils : : ssl_options_t { std : : move ( ssl_allowed_fingerprints ) , std : : move ( ssl_ca_path ) } ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
}
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
2019-03-15 05:03:32 +01:00
ssl_options . auth = epee : : net_utils : : ssl_authentication_t {
command_line : : get_arg ( vm , arg_rpc_ssl_private_key ) , command_line : : get_arg ( vm , arg_rpc_ssl_certificate )
} ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
2019-03-14 01:01:14 +01:00
// user specified CA file or fingeprints implies enabled SSL by default
2019-03-15 05:03:32 +01:00
if ( ssl_options . verification ! = epee : : net_utils : : ssl_verification_t : : user_certificates | | ! command_line : : is_arg_defaulted ( vm , arg_rpc_ssl ) )
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
{
2019-03-14 01:01:14 +01:00
const std : : string ssl = command_line : : get_arg ( vm , arg_rpc_ssl ) ;
2019-03-15 05:03:32 +01:00
if ( ! epee : : net_utils : : ssl_support_from_string ( ssl_options . support , ssl ) )
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
{
2019-03-14 01:01:14 +01:00
MFATAL ( " Invalid RPC SSL support: " < < ssl ) ;
return false ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
}
}
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
2017-12-21 12:45:01 +01:00
auto rng = [ ] ( size_t len , uint8_t * ptr ) { return crypto : : rand ( len , ptr ) ; } ;
2017-02-05 23:48:03 +01:00
return epee : : http_server_impl_base < core_rpc_server , connection_context > : : init (
2019-03-15 05:03:32 +01:00
rng , std : : move ( port ) , std : : move ( rpc_config - > bind_ip ) , std : : move ( rpc_config - > access_control_origins ) , std : : move ( http_login ) , std : : move ( ssl_options )
2017-02-05 23:48:03 +01:00
) ;
2014-03-03 23:07:58 +01:00
}
2014-06-01 23:53:44 +02:00
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : check_core_ready ( )
{
if ( ! m_p2p . get_payload_object ( ) . is_synchronized ( ) )
2014-03-20 12:46:11 +01:00
{
return false ;
}
2017-10-20 21:49:23 +02:00
return true ;
2014-03-20 12:46:11 +01:00
}
2014-10-06 15:18:16 +02:00
# define CHECK_CORE_READY() do { if(!check_core_ready()){res.status = CORE_RPC_STATUS_BUSY;return true;} } while(0)
2014-03-20 12:46:11 +01:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_height ( const COMMAND_RPC_GET_HEIGHT : : request & req , COMMAND_RPC_GET_HEIGHT : : response & res , const connection_context * ctx )
2014-03-20 12:46:11 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HEIGHT > ( invoke_http_mode : : JON , " /getheight " , req , res , r ) )
return r ;
2019-04-01 02:02:58 +02:00
crypto : : hash hash ;
m_core . get_blockchain_top ( res . height , hash ) ;
2019-04-11 19:23:25 +02:00
+ + res . height ; // block height to chain height
2019-04-01 02:02:58 +02:00
res . hash = string_tools : : pod_to_hex ( hash ) ;
2014-03-20 12:46:11 +01:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_info ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_info ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_INFO > ( invoke_http_mode : : JON , " /getinfo " , req , res , r ) )
{
res . bootstrap_daemon_address = m_bootstrap_daemon_address ;
crypto : : hash top_hash ;
m_core . get_blockchain_top ( res . height_without_bootstrap , top_hash ) ;
+ + res . height_without_bootstrap ; // turn top block height into blockchain height
res . was_bootstrap_ever_used = true ;
return r ;
}
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
2015-12-18 20:56:17 +01:00
crypto : : hash top_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2015-12-18 20:56:17 +01:00
+ + res . height ; // turn top block height into blockchain height
res . top_block_hash = string_tools : : pod_to_hex ( top_hash ) ;
2014-06-04 22:50:13 +02:00
res . target_height = m_core . get_target_blockchain_height ( ) ;
2014-03-03 23:07:58 +01:00
res . difficulty = m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) ;
2017-04-06 21:01:07 +02:00
res . target = m_core . get_blockchain_storage ( ) . get_difficulty_target ( ) ;
2014-03-03 23:07:58 +01:00
res . tx_count = m_core . get_blockchain_storage ( ) . get_total_transactions ( ) - res . height ; //without coinbase
res . tx_pool_size = m_core . get_pool_transactions_count ( ) ;
2019-01-11 20:09:39 +01:00
res . alt_blocks_count = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_alternative_blocks_count ( ) ;
2019-04-09 10:07:13 +02:00
uint64_t total_conn = restricted ? 0 : m_p2p . get_public_connections_count ( ) ;
res . outgoing_connections_count = restricted ? 0 : m_p2p . get_public_outgoing_connections_count ( ) ;
2019-01-11 20:09:39 +01:00
res . incoming_connections_count = restricted ? 0 : ( total_conn - res . outgoing_connections_count ) ;
res . rpc_connections_count = restricted ? 0 : get_connections_count ( ) ;
2019-04-09 10:07:13 +02:00
res . white_peerlist_size = restricted ? 0 : m_p2p . get_public_white_peers_count ( ) ;
res . grey_peerlist_size = restricted ? 0 : m_p2p . get_public_gray_peers_count ( ) ;
2018-11-16 06:31:11 +01:00
cryptonote : : network_type nettype = m_core . get_nettype ( ) ;
res . mainnet = nettype = = MAINNET ;
res . testnet = nettype = = TESTNET ;
res . stagenet = nettype = = STAGENET ;
res . nettype = nettype = = MAINNET ? " mainnet " : nettype = = TESTNET ? " testnet " : nettype = = STAGENET ? " stagenet " : " fakechain " ;
2016-10-15 16:00:21 +02:00
res . cumulative_difficulty = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( res . height - 1 ) ;
2018-07-18 23:24:53 +02:00
res . block_size_limit = res . block_weight_limit = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_limit ( ) ;
res . block_size_median = res . block_weight_median = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_median ( ) ;
2019-01-11 20:09:39 +01:00
res . start_time = restricted ? 0 : ( uint64_t ) m_core . get_start_time ( ) ;
res . free_space = restricted ? std : : numeric_limits < uint64_t > : : max ( ) : m_core . get_free_space ( ) ;
2017-11-30 16:44:01 +01:00
res . offline = m_core . offline ( ) ;
2019-01-11 20:09:39 +01:00
res . bootstrap_daemon_address = restricted ? " " : m_bootstrap_daemon_address ;
res . height_without_bootstrap = restricted ? 0 : res . height ;
if ( restricted )
2018-11-20 22:41:03 +01:00
res . was_bootstrap_ever_used = false ;
else
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
res . was_bootstrap_ever_used = m_was_bootstrap_ever_used ;
}
2019-02-21 13:55:55 +01:00
res . database_size = m_core . get_blockchain_storage ( ) . get_db ( ) . get_database_size ( ) ;
if ( restricted )
res . database_size = round_up ( res . database_size , 5ull * 1024 * 1024 * 1024 ) ;
2019-01-11 20:09:39 +01:00
res . update_available = restricted ? false : m_core . is_update_available ( ) ;
2019-01-30 05:18:15 +01:00
res . version = restricted ? " " : LOKI_VERSION ;
2019-03-29 13:56:47 +01:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-03-21 11:03:24 +01:00
bool core_rpc_server : : on_get_net_stats ( const COMMAND_RPC_GET_NET_STATS : : request & req , COMMAND_RPC_GET_NET_STATS : : response & res , const connection_context * ctx )
{
PERF_TIMER ( on_get_net_stats ) ;
// No bootstrap daemon check: Only ever get stats about local server
res . start_time = ( uint64_t ) m_core . get_start_time ( ) ;
{
CRITICAL_REGION_LOCAL ( epee : : net_utils : : network_throttle_manager : : m_lock_get_global_throttle_in ) ;
epee : : net_utils : : network_throttle_manager : : get_global_throttle_in ( ) . get_stats ( res . total_packets_in , res . total_bytes_in ) ;
}
{
CRITICAL_REGION_LOCAL ( epee : : net_utils : : network_throttle_manager : : m_lock_get_global_throttle_out ) ;
epee : : net_utils : : network_throttle_manager : : get_global_throttle_out ( ) . get_stats ( res . total_packets_out , res . total_bytes_out ) ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-10-14 09:54:07 +02:00
class pruned_transaction {
transaction & tx ;
public :
pruned_transaction ( transaction & tx ) : tx ( tx ) { }
BEGIN_SERIALIZE_OBJECT ( )
bool r = tx . serialize_base ( ar ) ;
if ( ! r ) return false ;
END_SERIALIZE ( )
} ;
2018-09-18 02:38:40 +02:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_blocks ( const COMMAND_RPC_GET_BLOCKS_FAST : : request & req , COMMAND_RPC_GET_BLOCKS_FAST : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_blocks ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_FAST > ( invoke_http_mode : : BIN , " /getblocks.bin " , req , res , r ) )
return r ;
2018-04-16 21:16:00 +02:00
std : : vector < std : : pair < std : : pair < cryptonote : : blobdata , crypto : : hash > , std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > > > bs ;
2014-08-01 10:17:50 +02:00
2018-04-16 21:16:00 +02:00
if ( ! m_core . find_blockchain_supplement ( req . start_height , req . block_ids , bs , res . current_height , res . start_height , req . prune , ! req . no_miner_tx , COMMAND_RPC_GET_BLOCKS_FAST_MAX_COUNT ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed " ;
return false ;
}
2017-02-27 21:26:17 +01:00
size_t pruned_size = 0 , unpruned_size = 0 , ntxes = 0 ;
2018-04-16 01:16:02 +02:00
res . blocks . reserve ( bs . size ( ) ) ;
res . output_indices . reserve ( bs . size ( ) ) ;
2017-01-15 17:05:55 +01:00
for ( auto & bd : bs )
2014-03-03 23:07:58 +01:00
{
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
2018-04-16 21:16:00 +02:00
res . blocks . back ( ) . block = bd . first . first ;
pruned_size + = bd . first . first . size ( ) ;
unpruned_size + = bd . first . first . size ( ) ;
2016-07-13 20:26:11 +02:00
res . output_indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : block_output_indices ( ) ) ;
2017-02-27 21:26:17 +01:00
ntxes + = bd . second . size ( ) ;
2018-12-16 14:28:49 +01:00
res . output_indices . back ( ) . indices . reserve ( 1 + bd . second . size ( ) ) ;
if ( req . no_miner_tx )
res . output_indices . back ( ) . indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : tx_output_indices ( ) ) ;
2018-04-16 01:16:02 +02:00
res . blocks . back ( ) . txs . reserve ( bd . second . size ( ) ) ;
2018-04-16 21:16:00 +02:00
for ( std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > : : iterator i = bd . second . begin ( ) ; i ! = bd . second . end ( ) ; + + i )
2014-03-03 23:07:58 +01:00
{
2018-04-16 21:16:00 +02:00
unpruned_size + = i - > second . size ( ) ;
res . blocks . back ( ) . txs . push_back ( std : : move ( i - > second ) ) ;
i - > second . clear ( ) ;
i - > second . shrink_to_fit ( ) ;
2017-02-27 21:26:17 +01:00
pruned_size + = res . blocks . back ( ) . txs . back ( ) . size ( ) ;
2018-12-16 14:28:49 +01:00
}
2017-08-25 19:59:29 +02:00
2018-12-16 14:28:49 +01:00
const size_t n_txes_to_lookup = bd . second . size ( ) + ( req . no_miner_tx ? 0 : 1 ) ;
if ( n_txes_to_lookup > 0 )
{
std : : vector < std : : vector < uint64_t > > indices ;
bool r = m_core . get_tx_outputs_gindexs ( req . no_miner_tx ? bd . second . front ( ) . first : bd . first . second , n_txes_to_lookup , indices ) ;
2016-07-13 20:26:11 +02:00
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2018-12-16 14:28:49 +01:00
if ( indices . size ( ) ! = n_txes_to_lookup | | res . output_indices . back ( ) . indices . size ( ) ! = ( req . no_miner_tx ? 1 : 0 ) )
{
res . status = " Failed " ;
return false ;
}
for ( size_t i = 0 ; i < indices . size ( ) ; + + i )
res . output_indices . back ( ) . indices . push_back ( { std : : move ( indices [ i ] ) } ) ;
2014-03-03 23:07:58 +01:00
}
}
2017-02-27 21:26:17 +01:00
MDEBUG ( " on_get_blocks: " < < bs . size ( ) < < " blocks, " < < ntxes < < " txes, pruned size " < < pruned_size < < " , unpruned size " < < unpruned_size ) ;
2014-03-03 23:07:58 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2017-07-04 06:32:44 +02:00
}
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_alt_blocks_hashes ( const COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : request & req , COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : response & res , const connection_context * ctx )
2017-07-04 06:32:44 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_alt_blocks_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_ALT_BLOCKS_HASHES > ( invoke_http_mode : : JON , " /get_alt_blocks_hashes " , req , res , r ) )
return r ;
2018-04-16 01:16:02 +02:00
std : : vector < block > blks ;
2017-07-04 06:32:44 +02:00
if ( ! m_core . get_alternative_blocks ( blks ) )
{
res . status = " Failed " ;
return false ;
}
res . blks_hashes . reserve ( blks . size ( ) ) ;
for ( auto const & blk : blks )
{
res . blks_hashes . push_back ( epee : : string_tools : : pod_to_hex ( get_block_hash ( blk ) ) ) ;
}
MDEBUG ( " on_get_alt_blocks_hashes: " < < blks . size ( ) < < " blocks " ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
2014-03-03 23:07:58 +01:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_blocks_by_height ( const COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : response & res , const connection_context * ctx )
2016-12-25 09:18:15 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_blocks_by_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_BY_HEIGHT > ( invoke_http_mode : : BIN , " /getblocks_by_height.bin " , req , res , r ) )
return r ;
2016-12-25 09:18:15 +01:00
res . status = " Failed " ;
res . blocks . clear ( ) ;
res . blocks . reserve ( req . heights . size ( ) ) ;
for ( uint64_t height : req . heights )
{
block blk ;
try
{
blk = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( height ) ;
}
catch ( . . . )
{
2017-01-26 19:11:37 +01:00
res . status = " Error retrieving block at height " + std : : to_string ( height ) ;
2016-12-25 09:18:15 +01:00
return true ;
}
2018-04-16 01:16:02 +02:00
std : : vector < transaction > txs ;
std : : vector < crypto : : hash > missed_txs ;
2016-12-25 09:18:15 +01:00
m_core . get_transactions ( blk . tx_hashes , txs , missed_txs ) ;
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
res . blocks . back ( ) . block = block_to_blob ( blk ) ;
for ( auto & tx : txs )
res . blocks . back ( ) . txs . push_back ( tx_to_blob ( tx ) ) ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_hashes ( const COMMAND_RPC_GET_HASHES_FAST : : request & req , COMMAND_RPC_GET_HASHES_FAST : : response & res , const connection_context * ctx )
2016-04-14 00:45:02 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HASHES_FAST > ( invoke_http_mode : : BIN , " /gethashes.bin " , req , res , r ) )
return r ;
2019-04-15 14:29:47 +02:00
res . start_height = req . start_height ;
if ( ! m_core . get_blockchain_storage ( ) . find_blockchain_supplement ( req . block_ids , res . m_block_ids , res . start_height , res . current_height , false ) )
2016-04-14 00:45:02 +02:00
{
res . status = " Failed " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_outs_bin ( const COMMAND_RPC_GET_OUTPUTS_BIN : : request & req , COMMAND_RPC_GET_OUTPUTS_BIN : : response & res , const connection_context * ctx )
2016-08-02 22:48:09 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_outs_bin ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS_BIN > ( invoke_http_mode : : BIN , " /get_outs.bin " , req , res , r ) )
return r ;
2016-08-02 22:48:09 +02:00
res . status = " Failed " ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-08-02 22:48:09 +02:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
if ( ! m_core . get_outs ( req , res ) )
{
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_outs ( const COMMAND_RPC_GET_OUTPUTS : : request & req , COMMAND_RPC_GET_OUTPUTS : : response & res , const connection_context * ctx )
2016-11-22 21:00:40 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_outs ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS > ( invoke_http_mode : : JON , " /get_outs " , req , res , r ) )
return r ;
2016-11-22 21:00:40 +01:00
res . status = " Failed " ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-11-22 21:00:40 +01:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : request req_bin ;
req_bin . outputs = req . outputs ;
2019-03-23 17:20:08 +01:00
req_bin . get_txid = req . get_txid ;
2016-11-22 21:00:40 +01:00
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : response res_bin ;
if ( ! m_core . get_outs ( req_bin , res_bin ) )
{
return true ;
}
// convert to text
for ( const auto & i : res_bin . outs )
{
res . outs . push_back ( cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey ( ) ) ;
cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey & outkey = res . outs . back ( ) ;
outkey . key = epee : : string_tools : : pod_to_hex ( i . key ) ;
outkey . mask = epee : : string_tools : : pod_to_hex ( i . mask ) ;
outkey . unlocked = i . unlocked ;
2016-12-23 13:04:54 +01:00
outkey . height = i . height ;
outkey . txid = epee : : string_tools : : pod_to_hex ( i . txid ) ;
2016-11-22 21:00:40 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_indexes ( const COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : request & req , COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_indexes ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES > ( invoke_http_mode : : BIN , " /get_o_indexes.bin " , req , res , ok ) )
return ok ;
2014-03-03 23:07:58 +01:00
bool r = m_core . get_tx_outputs_gindexs ( req . txid , res . o_indexes ) ;
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
LOG_PRINT_L2 ( " COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES: [ " < < res . o_indexes . size ( ) < < " ] " ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transactions ( const COMMAND_RPC_GET_TRANSACTIONS : : request & req , COMMAND_RPC_GET_TRANSACTIONS : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transactions ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTIONS > ( invoke_http_mode : : JON , " /gettransactions " , req , res , ok ) )
return ok ;
2014-03-03 23:07:58 +01:00
std : : vector < crypto : : hash > vh ;
2017-01-22 21:38:10 +01:00
for ( const auto & tx_hex_str : req . txs_hashes )
2014-03-03 23:07:58 +01:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( tx_hex_str , b ) )
{
res . status = " Failed to parse hex representation of transaction hash " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : hash ) )
{
res . status = " Failed, size of data mismatch " ;
2016-01-30 14:28:12 +01:00
return true ;
2014-03-03 23:07:58 +01:00
}
vh . push_back ( * reinterpret_cast < const crypto : : hash * > ( b . data ( ) ) ) ;
}
2018-04-16 01:16:02 +02:00
std : : vector < crypto : : hash > missed_txs ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > txs ;
bool r = m_core . get_split_transactions_blobs ( vh , txs , missed_txs ) ;
2014-03-03 23:07:58 +01:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-11 16:53:55 +02:00
LOG_PRINT_L2 ( " Found " < < txs . size ( ) < < " / " < < vh . size ( ) < < " transactions on the blockchain " ) ;
// try the pool for any missing txes
size_t found_in_pool = 0 ;
2016-04-03 13:51:28 +02:00
std : : unordered_set < crypto : : hash > pool_tx_hashes ;
2019-03-13 13:05:50 +01:00
std : : unordered_map < crypto : : hash , tx_info > per_tx_pool_tx_info ;
2015-08-11 16:53:55 +02:00
if ( ! missed_txs . empty ( ) )
{
2017-09-22 14:57:20 +02:00
std : : vector < tx_info > pool_tx_info ;
std : : vector < spent_key_image_info > pool_key_image_info ;
bool r = m_core . get_pool_transactions_and_spent_keys_info ( pool_tx_info , pool_key_image_info ) ;
2015-08-11 16:53:55 +02:00
if ( r )
{
2017-09-01 22:22:45 +02:00
// sort to match original request
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > sorted_txs ;
2017-09-22 14:57:20 +02:00
std : : vector < tx_info > : : const_iterator i ;
2018-04-16 01:16:02 +02:00
unsigned txs_processed = 0 ;
2017-09-01 22:22:45 +02:00
for ( const crypto : : hash & h : vh )
2015-08-11 16:53:55 +02:00
{
2017-09-01 22:22:45 +02:00
if ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) = = missed_txs . end ( ) )
2015-08-11 16:53:55 +02:00
{
2018-04-16 01:16:02 +02:00
if ( txs . size ( ) = = txs_processed )
2017-12-11 23:36:58 +01:00
{
res . status = " Failed: internal error - txs is empty " ;
return true ;
}
2017-09-01 22:22:45 +02:00
// core returns the ones it finds in the right order
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
if ( std : : get < 0 > ( txs [ txs_processed ] ) ! = h )
2017-09-01 22:22:45 +02:00
{
res . status = " Failed: tx hash mismatch " ;
return true ;
}
2018-04-16 01:16:02 +02:00
sorted_txs . push_back ( std : : move ( txs [ txs_processed ] ) ) ;
+ + txs_processed ;
2017-09-01 22:22:45 +02:00
}
2017-09-22 14:57:20 +02:00
else if ( ( i = std : : find_if ( pool_tx_info . begin ( ) , pool_tx_info . end ( ) , [ h ] ( const tx_info & txi ) { return epee : : string_tools : : pod_to_hex ( h ) = = txi . id_hash ; } ) ) ! = pool_tx_info . end ( ) )
2017-09-01 22:22:45 +02:00
{
2017-09-22 14:57:20 +02:00
cryptonote : : transaction tx ;
if ( ! cryptonote : : parse_and_validate_tx_from_blob ( i - > tx_blob , tx ) )
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
}
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : stringstream ss ;
binary_archive < true > ba ( ss ) ;
bool r = const_cast < cryptonote : : transaction & > ( tx ) . serialize_base ( ba ) ;
if ( ! r )
{
res . status = " Failed to serialize transaction base " ;
return true ;
}
const cryptonote : : blobdata pruned = ss . str ( ) ;
sorted_txs . push_back ( std : : make_tuple ( h , pruned , get_transaction_prunable_hash ( tx ) , std : : string ( i - > tx_blob , pruned . size ( ) ) ) ) ;
2018-04-16 01:16:02 +02:00
missed_txs . erase ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) ) ;
2017-09-29 17:26:57 +02:00
pool_tx_hashes . insert ( h ) ;
2017-09-22 14:57:20 +02:00
const std : : string hash_string = epee : : string_tools : : pod_to_hex ( h ) ;
for ( const auto & ti : pool_tx_info )
{
if ( ti . id_hash = = hash_string )
{
2019-03-13 13:05:50 +01:00
per_tx_pool_tx_info . insert ( std : : make_pair ( h , ti ) ) ;
2017-09-22 14:57:20 +02:00
break ;
}
}
2015-08-11 16:53:55 +02:00
+ + found_in_pool ;
}
}
2017-09-01 22:22:45 +02:00
txs = sorted_txs ;
2015-08-11 16:53:55 +02:00
}
LOG_PRINT_L2 ( " Found " < < found_in_pool < < " / " < < vh . size ( ) < < " transactions in the pool " ) ;
}
2014-03-03 23:07:58 +01:00
2018-04-16 01:16:02 +02:00
std : : vector < std : : string > : : const_iterator txhi = req . txs_hashes . begin ( ) ;
2016-04-03 13:51:28 +02:00
std : : vector < crypto : : hash > : : const_iterator vhi = vh . begin ( ) ;
2017-01-22 21:38:10 +01:00
for ( auto & tx : txs )
2014-03-03 23:07:58 +01:00
{
2016-04-03 13:51:28 +02:00
res . txs . push_back ( COMMAND_RPC_GET_TRANSACTIONS : : entry ( ) ) ;
COMMAND_RPC_GET_TRANSACTIONS : : entry & e = res . txs . back ( ) ;
crypto : : hash tx_hash = * vhi + + ;
e . tx_hash = * txhi + + ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
e . prunable_hash = epee : : string_tools : : pod_to_hex ( std : : get < 2 > ( tx ) ) ;
if ( req . split | | req . prune | | std : : get < 3 > ( tx ) . empty ( ) )
{
2019-03-27 10:41:18 +01:00
// use splitted form with pruned and prunable (filled only when prune=false and the daemon has it), leaving as_hex as empty
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
e . pruned_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 1 > ( tx ) ) ;
if ( ! req . prune )
e . prunable_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 3 > ( tx ) ) ;
2019-03-27 10:41:18 +01:00
if ( req . decode_as_json )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
{
2019-03-27 10:41:18 +01:00
cryptonote : : blobdata tx_data ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
cryptonote : : transaction t ;
2019-03-27 10:41:18 +01:00
if ( req . prune | | std : : get < 3 > ( tx ) . empty ( ) )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
{
2019-03-27 10:41:18 +01:00
// decode pruned tx to JSON
tx_data = std : : get < 1 > ( tx ) ;
if ( cryptonote : : parse_and_validate_tx_base_from_blob ( tx_data , t ) )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
{
pruned_transaction pruned_tx { t } ;
e . as_json = obj_to_json_str ( pruned_tx ) ;
}
else
2019-03-27 10:41:18 +01:00
{
res . status = " Failed to parse and validate pruned tx from blob " ;
return true ;
}
}
else
{
// decode full tx to JSON
tx_data = std : : get < 1 > ( tx ) + std : : get < 3 > ( tx ) ;
if ( cryptonote : : parse_and_validate_tx_from_blob ( tx_data , t ) )
{
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
e . as_json = obj_to_json_str ( t ) ;
2019-03-27 10:41:18 +01:00
}
else
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
}
}
}
}
else
{
// use non-splitted form, leaving pruned_as_hex and prunable_as_hex as empty
cryptonote : : blobdata tx_data = std : : get < 1 > ( tx ) + std : : get < 3 > ( tx ) ;
e . as_hex = string_tools : : buff_to_hex_nodelimer ( tx_data ) ;
if ( req . decode_as_json )
{
cryptonote : : transaction t ;
if ( cryptonote : : parse_and_validate_tx_from_blob ( tx_data , t ) )
{
e . as_json = obj_to_json_str ( t ) ;
}
else
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
}
}
}
2016-04-03 13:51:28 +02:00
e . in_pool = pool_tx_hashes . find ( tx_hash ) ! = pool_tx_hashes . end ( ) ;
if ( e . in_pool )
{
2017-08-30 04:30:31 +02:00
e . block_height = e . block_timestamp = std : : numeric_limits < uint64_t > : : max ( ) ;
2019-03-13 13:05:50 +01:00
auto it = per_tx_pool_tx_info . find ( tx_hash ) ;
if ( it ! = per_tx_pool_tx_info . end ( ) )
2017-09-22 14:57:20 +02:00
{
2019-03-13 13:05:50 +01:00
e . double_spend_seen = it - > second . double_spend_seen ;
e . relayed = it - > second . relayed ;
2017-09-22 14:57:20 +02:00
}
else
{
2019-03-13 13:05:50 +01:00
MERROR ( " Failed to determine pool info for " < < tx_hash ) ;
2017-09-22 14:57:20 +02:00
e . double_spend_seen = false ;
2019-03-13 13:05:50 +01:00
e . relayed = false ;
2017-09-22 14:57:20 +02:00
}
2016-04-03 13:51:28 +02:00
}
else
{
e . block_height = m_core . get_blockchain_storage ( ) . get_db ( ) . get_tx_block_height ( tx_hash ) ;
2017-08-30 04:30:31 +02:00
e . block_timestamp = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_timestamp ( e . block_height ) ;
2017-09-22 14:57:20 +02:00
e . double_spend_seen = false ;
2019-03-13 13:05:50 +01:00
e . relayed = false ;
2016-04-03 13:51:28 +02:00
}
// fill up old style responses too, in case an old wallet asks
res . txs_as_hex . push_back ( e . as_hex ) ;
2015-10-13 23:11:52 +02:00
if ( req . decode_as_json )
2016-04-03 13:51:28 +02:00
res . txs_as_json . push_back ( e . as_json ) ;
2016-11-20 15:12:19 +01:00
2016-11-23 19:55:32 +01:00
// output indices too if not in pool
if ( pool_tx_hashes . find ( tx_hash ) = = pool_tx_hashes . end ( ) )
2016-11-20 15:12:19 +01:00
{
2016-11-23 19:55:32 +01:00
bool r = m_core . get_tx_outputs_gindexs ( tx_hash , e . output_indices ) ;
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2016-11-20 15:12:19 +01:00
}
2014-03-03 23:07:58 +01:00
}
2017-01-22 21:38:10 +01:00
for ( const auto & miss_tx : missed_txs )
2014-03-03 23:07:58 +01:00
{
res . missed_tx . push_back ( string_tools : : pod_to_hex ( miss_tx ) ) ;
}
2016-04-03 13:51:28 +02:00
LOG_PRINT_L2 ( res . txs . size ( ) < < " transactions found, " < < res . missed_tx . size ( ) < < " not found " ) ;
2014-03-03 23:07:58 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_is_key_image_spent ( const COMMAND_RPC_IS_KEY_IMAGE_SPENT : : request & req , COMMAND_RPC_IS_KEY_IMAGE_SPENT : : response & res , const connection_context * ctx )
2015-08-11 11:49:15 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_is_key_image_spent ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_IS_KEY_IMAGE_SPENT > ( invoke_http_mode : : JON , " /is_key_image_spent " , req , res , ok ) )
return ok ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2015-08-11 11:49:15 +02:00
std : : vector < crypto : : key_image > key_images ;
2017-01-22 21:38:10 +01:00
for ( const auto & ki_hex_str : req . key_images )
2015-08-11 11:49:15 +02:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( ki_hex_str , b ) )
{
res . status = " Failed to parse hex representation of key image " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : key_image ) )
{
res . status = " Failed, size of data mismatch " ;
}
key_images . push_back ( * reinterpret_cast < const crypto : : key_image * > ( b . data ( ) ) ) ;
}
2015-08-13 17:33:28 +02:00
std : : vector < bool > spent_status ;
bool r = m_core . are_key_images_spent ( key_images , spent_status ) ;
2015-08-11 11:49:15 +02:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-13 17:33:28 +02:00
res . spent_status . clear ( ) ;
for ( size_t n = 0 ; n < spent_status . size ( ) ; + + n )
2016-01-05 22:57:43 +01:00
res . spent_status . push_back ( spent_status [ n ] ? COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_BLOCKCHAIN : COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT ) ;
// check the pool too
std : : vector < cryptonote : : tx_info > txs ;
std : : vector < cryptonote : : spent_key_image_info > ki ;
2019-01-11 20:09:39 +01:00
r = m_core . get_pool_transactions_and_spent_keys_info ( txs , ki , ! request_has_rpc_origin | | ! restricted ) ;
2016-01-05 22:57:43 +01:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
for ( std : : vector < cryptonote : : spent_key_image_info > : : const_iterator i = ki . begin ( ) ; i ! = ki . end ( ) ; + + i )
{
crypto : : hash hash ;
crypto : : key_image spent_key_image ;
if ( parse_hash256 ( i - > id_hash , hash ) )
{
memcpy ( & spent_key_image , & hash , sizeof ( hash ) ) ; // a bit dodgy, should be other parse functions somewhere
for ( size_t n = 0 ; n < res . spent_status . size ( ) ; + + n )
{
if ( res . spent_status [ n ] = = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT )
{
if ( key_images [ n ] = = spent_key_image )
{
res . spent_status [ n ] = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_POOL ;
break ;
}
}
}
}
}
2015-08-11 11:49:15 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_send_raw_tx ( const COMMAND_RPC_SEND_RAW_TX : : request & req , COMMAND_RPC_SEND_RAW_TX : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_send_raw_tx ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_SEND_RAW_TX > ( invoke_http_mode : : JON , " /sendrawtransaction " , req , res , ok ) )
return ok ;
2014-04-02 18:00:17 +02:00
CHECK_CORE_READY ( ) ;
2014-03-03 23:07:58 +01:00
std : : string tx_blob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req . tx_as_hex , tx_blob ) )
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to parse tx from hexbuff: " < < req . tx_as_hex ) ;
res . status = " Failed " ;
return true ;
}
2019-04-12 22:20:20 +02:00
if ( req . do_sanity_checks & & ! cryptonote : : tx_sanity_check ( m_core . get_blockchain_storage ( ) , tx_blob ) )
{
res . status = " Failed " ;
res . reason = " Sanity check failed " ;
res . sanity_check_failed = true ;
return true ;
}
2014-03-03 23:07:58 +01:00
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
tx_verification_context tvc = AUTO_VAL_INIT ( tvc ) ;
2017-01-14 14:01:21 +01:00
if ( ! m_core . handle_incoming_tx ( tx_blob , tvc , false , false , req . do_not_relay ) | | tvc . m_verifivation_failed )
2014-03-03 23:07:58 +01:00
{
Service Node Deregister Part 5 (#89)
* Retrieve quorum list from height, reviewed
* Setup data structures for de/register TX
* Submit and validate partial/full deregisters
* Add P2P relaying of partial deregistration votes
* Code review adjustments for deregistration part 1
- Fix check_tx_semantic
- Remove signature_pod as votes are now stored as blobs. Serialization
overrides don't intefere with crypto::signature anymore.
* deregistration_vote_pool - changed sign/verify interface and removed repeated code
* Misc review, fix sign/verify api, vote threshold
* Deregister/tx edge case handling for combinatoric votes
* core, service_node_list: separated address from service node pubkey
* Retrieve quorum list from height, reviewed
* Setup data structures for de/register TX
* Submit and validate partial/full deregisters
* Add P2P relaying of partial deregistration votes
* Code review adjustments for deregistration part 1
- Fix check_tx_semantic
- Remove signature_pod as votes are now stored as blobs. Serialization
overrides don't intefere with crypto::signature anymore.
* deregistration_vote_pool - changed sign/verify interface and removed repeated code
* Misc review, fix sign/verify api, vote threshold
* Deregister/tx edge case handling for combinatoric votes
* Store service node lists for the duration of deregister lifetimes
* Quorum min/max bug, sort node list, fix node to test list
* Change quorum to store acc pub address, fix oob bug
* Code review for expiring votes, acc keys to pub_key, improve err msgs
* Add early out for is_deregistration_tx and protect against quorum changes
* Remove debug code, fix segfault
* Remove irrelevant check for tx v3 in blockchain, fix >= height for pruning quorum states
Incorrect assumption that a transaction can be kept in the chain if it could
eventually become invalid, because if it were the chain would be split and
eventually these transaction would be dropped. But also that we should not
override the pre-existing logic which handles this case anyway.
2018-07-18 04:42:47 +02:00
const vote_verification_context & vvc = tvc . m_vote_ctx ;
2019-01-18 01:27:03 +01:00
res . status = " Failed " ;
std : : string reason = print_tx_verification_context ( tvc ) ;
reason + = print_vote_verification_context ( vvc ) ;
res . tvc = tvc ;
2017-09-16 11:31:49 +02:00
const std : : string punctuation = res . reason . empty ( ) ? " " : " : " ;
if ( tvc . m_verifivation_failed )
{
2018-12-30 02:32:20 +01:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx verification failed " < < punctuation < < reason ) ;
2017-09-16 11:31:49 +02:00
}
else
{
2018-12-30 02:32:20 +01:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to process tx " < < punctuation < < reason ) ;
2017-09-16 11:31:49 +02:00
}
2014-03-03 23:07:58 +01:00
return true ;
}
2017-01-14 14:01:21 +01:00
if ( ! tvc . m_should_be_relayed )
2014-03-03 23:07:58 +01:00
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx accepted, but not relayed " ) ;
2016-03-27 13:35:36 +02:00
res . reason = " Not relayed " ;
res . not_relayed = true ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
NOTIFY_NEW_TRANSACTIONS : : request r ;
r . txs . push_back ( tx_blob ) ;
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
Service Node Deregister Part 5 (#89)
* Retrieve quorum list from height, reviewed
* Setup data structures for de/register TX
* Submit and validate partial/full deregisters
* Add P2P relaying of partial deregistration votes
* Code review adjustments for deregistration part 1
- Fix check_tx_semantic
- Remove signature_pod as votes are now stored as blobs. Serialization
overrides don't intefere with crypto::signature anymore.
* deregistration_vote_pool - changed sign/verify interface and removed repeated code
* Misc review, fix sign/verify api, vote threshold
* Deregister/tx edge case handling for combinatoric votes
* core, service_node_list: separated address from service node pubkey
* Retrieve quorum list from height, reviewed
* Setup data structures for de/register TX
* Submit and validate partial/full deregisters
* Add P2P relaying of partial deregistration votes
* Code review adjustments for deregistration part 1
- Fix check_tx_semantic
- Remove signature_pod as votes are now stored as blobs. Serialization
overrides don't intefere with crypto::signature anymore.
* deregistration_vote_pool - changed sign/verify interface and removed repeated code
* Misc review, fix sign/verify api, vote threshold
* Deregister/tx edge case handling for combinatoric votes
* Store service node lists for the duration of deregister lifetimes
* Quorum min/max bug, sort node list, fix node to test list
* Change quorum to store acc pub address, fix oob bug
* Code review for expiring votes, acc keys to pub_key, improve err msgs
* Add early out for is_deregistration_tx and protect against quorum changes
* Remove debug code, fix segfault
* Remove irrelevant check for tx v3 in blockchain, fix >= height for pruning quorum states
Incorrect assumption that a transaction can be kept in the chain if it could
eventually become invalid, because if it were the chain would be split and
eventually these transaction would be dropped. But also that we should not
override the pre-existing logic which handles this case anyway.
2018-07-18 04:42:47 +02:00
2014-03-03 23:07:58 +01:00
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_start_mining ( const COMMAND_RPC_START_MINING : : request & req , COMMAND_RPC_START_MINING : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_start_mining ) ;
2014-03-20 12:46:11 +01:00
CHECK_CORE_READY ( ) ;
2017-02-19 03:42:10 +01:00
cryptonote : : address_parse_info info ;
2018-11-16 06:31:11 +01:00
if ( ! get_account_address_from_str ( info , m_core . get_nettype ( ) , req . miner_address ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, wrong address " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
2017-02-19 03:42:10 +01:00
if ( info . is_subaddress )
{
res . status = " Mining to subaddress isn't supported yet " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2014-03-03 23:07:58 +01:00
2017-01-26 20:31:56 +01:00
unsigned int concurrency_count = boost : : thread : : hardware_concurrency ( ) * 4 ;
// if we couldn't detect threads, set it to a ridiculously high number
if ( concurrency_count = = 0 )
{
concurrency_count = 257 ;
}
// if there are more threads requested than the hardware supports
// then we fail and log that.
if ( req . threads_count > concurrency_count )
{
res . status = " Failed, too many threads relative to CPU cores. " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2014-04-30 22:50:06 +02:00
boost : : thread : : attributes attrs ;
attrs . set_stack_size ( THREAD_STACK_SIZE ) ;
2018-09-09 17:26:50 +02:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( miner . is_mining ( ) )
{
res . status = " Already mining " ;
return true ;
}
if ( ! miner . start ( info . address , static_cast < size_t > ( req . threads_count ) , attrs , req . do_background_mining , req . ignore_battery ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, mining not started " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_mining ( const COMMAND_RPC_STOP_MINING : : request & req , COMMAND_RPC_STOP_MINING : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_mining ) ;
2018-10-28 14:50:33 +01:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( ! miner . is_mining ( ) )
{
res . status = " Mining never started " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
if ( ! miner . stop ( ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, mining not stopped " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_mining_status ( const COMMAND_RPC_MINING_STATUS : : request & req , COMMAND_RPC_MINING_STATUS : : response & res , const connection_context * ctx )
2014-05-25 21:36:12 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_mining_status ) ;
2014-05-25 21:36:12 +02:00
const miner & lMiner = m_core . get_miner ( ) ;
res . active = lMiner . is_mining ( ) ;
2017-02-08 22:17:50 +01:00
res . is_background_mining_enabled = lMiner . get_is_background_mining_enabled ( ) ;
2019-04-12 07:06:49 +02:00
res . block_target = DIFFICULTY_TARGET_V2 ;
2019-04-12 10:37:56 +02:00
res . difficulty = m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) ;
2014-05-25 21:36:12 +02:00
if ( lMiner . is_mining ( ) ) {
res . speed = lMiner . get_speed ( ) ;
res . threads_count = lMiner . get_threads_count ( ) ;
2019-02-22 21:17:45 +01:00
res . block_reward = lMiner . get_block_reward ( ) ;
}
const account_public_address & lMiningAdr = lMiner . get_mining_address ( ) ;
res . address = get_account_address_as_str ( nettype ( ) , false , lMiningAdr ) ;
const uint8_t major_version = m_core . get_blockchain_storage ( ) . get_current_hard_fork_version ( ) ;
2019-04-12 07:06:49 +02:00
if ( major_version > = network_version_7 & & major_version < = network_version_10_bulletproofs )
res . pow_algorithm = " Cryptonight Heavy (Variant 2) " ;
else
res . pow_algorithm = " Cryptonight Turtle Light (Variant 2) " ;
2019-02-22 21:17:45 +01:00
if ( res . is_background_mining_enabled )
{
res . bg_idle_threshold = lMiner . get_idle_threshold ( ) ;
res . bg_min_idle_seconds = lMiner . get_min_idle_seconds ( ) ;
res . bg_ignore_battery = lMiner . get_ignore_battery ( ) ;
res . bg_target = lMiner . get_mining_target ( ) ;
2014-05-25 21:36:12 +02:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_save_bc ( const COMMAND_RPC_SAVE_BC : : request & req , COMMAND_RPC_SAVE_BC : : response & res , const connection_context * ctx )
2014-05-16 00:21:43 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_save_bc ) ;
2014-05-16 00:21:43 +02:00
if ( ! m_core . get_blockchain_storage ( ) . store_blockchain ( ) )
{
2018-03-01 12:36:19 +01:00
res . status = " Error while storing blockchain " ;
2014-05-16 00:21:43 +02:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_peer_list ( const COMMAND_RPC_GET_PEER_LIST : : request & req , COMMAND_RPC_GET_PEER_LIST : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_peer_list ) ;
2018-12-05 23:25:27 +01:00
std : : vector < nodetool : : peerlist_entry > white_list ;
std : : vector < nodetool : : peerlist_entry > gray_list ;
2019-04-09 10:07:13 +02:00
m_p2p . get_public_peerlist ( gray_list , white_list ) ;
2015-05-28 15:07:31 +02:00
2018-12-05 23:25:27 +01:00
res . white_list . reserve ( white_list . size ( ) ) ;
2015-02-05 10:11:20 +01:00
for ( auto & entry : white_list )
{
2019-04-09 10:07:13 +02:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 12:35:54 +02:00
res . white_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
2019-02-24 09:47:49 +01:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2017-05-27 12:35:54 +02:00
else
2019-02-24 09:47:49 +01:00
res . white_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2015-02-05 10:11:20 +01:00
}
2018-12-05 23:25:27 +01:00
res . gray_list . reserve ( gray_list . size ( ) ) ;
2015-02-05 10:11:20 +01:00
for ( auto & entry : gray_list )
{
2019-04-09 10:07:13 +02:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 12:35:54 +02:00
res . gray_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
2019-02-24 09:47:49 +01:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2017-05-27 12:35:54 +02:00
else
2019-02-24 09:47:49 +01:00
res . gray_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed , entry . rpc_port ) ;
2015-02-05 10:11:20 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_hash_rate ( const COMMAND_RPC_SET_LOG_HASH_RATE : : request & req , COMMAND_RPC_SET_LOG_HASH_RATE : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_hash_rate ) ;
2015-02-05 10:11:20 +01:00
if ( m_core . get_miner ( ) . is_mining ( ) )
{
m_core . get_miner ( ) . do_print_hashrate ( req . visible ) ;
res . status = CORE_RPC_STATUS_OK ;
}
else
{
res . status = CORE_RPC_STATUS_NOT_MINING ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_level ( const COMMAND_RPC_SET_LOG_LEVEL : : request & req , COMMAND_RPC_SET_LOG_LEVEL : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_level ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
if ( req . level < 0 | | req . level > 4 )
2015-02-05 10:11:20 +01:00
{
res . status = " Error: log level not valid " ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
return true ;
2015-02-05 10:11:20 +01:00
}
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
mlog_set_log_level ( req . level ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_categories ( const COMMAND_RPC_SET_LOG_CATEGORIES : : request & req , COMMAND_RPC_SET_LOG_CATEGORIES : : response & res , const connection_context * ctx )
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_categories ) ;
2017-02-12 12:37:09 +01:00
mlog_set_log ( req . categories . c_str ( ) ) ;
2017-09-22 18:54:58 +02:00
res . categories = mlog_get_categories ( ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-02-05 10:11:20 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool ( const COMMAND_RPC_GET_TRANSACTION_POOL : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL > ( invoke_http_mode : : JON , " /get_transaction_pool " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transactions_and_spent_keys_info ( res . transactions , res . spent_key_images , ! request_has_rpc_origin | | ! restricted ) ;
2018-10-18 01:01:56 +02:00
for ( tx_info & txi : res . transactions )
txi . tx_blob = epee : : string_tools : : buff_to_hex_nodelimer ( txi . tx_blob ) ;
2015-02-05 10:11:20 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_hashes_bin ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : response & res , const connection_context * ctx )
2017-03-22 19:03:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
2018-06-20 13:48:10 +02:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes.bin " , req , res , r ) )
2018-01-20 11:38:14 +01:00
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_hashes ( res . tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2017-03-22 19:03:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_hashes ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : response & res , const connection_context * ctx )
2018-06-20 13:48:10 +02:00
{
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2018-06-20 13:48:10 +02:00
std : : vector < crypto : : hash > tx_hashes ;
2019-01-11 20:09:39 +01:00
m_core . get_pool_transaction_hashes ( tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2018-06-20 13:48:10 +02:00
res . tx_hashes . reserve ( tx_hashes . size ( ) ) ;
for ( const crypto : : hash & tx_hash : tx_hashes )
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( tx_hash ) ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_stats ( const COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : response & res , const connection_context * ctx )
2017-05-31 20:11:56 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool_stats ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_STATS > ( invoke_http_mode : : JON , " /get_transaction_pool_stats " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_stats ( res . pool_stats , ! request_has_rpc_origin | | ! restricted ) ;
2017-05-31 20:11:56 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_daemon ( const COMMAND_RPC_STOP_DAEMON : : request & req , COMMAND_RPC_STOP_DAEMON : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_daemon ) ;
2015-02-05 10:11:20 +01:00
// FIXME: replace back to original m_p2p.send_stop_signal() after
// investigating why that isn't working quite right.
2015-02-05 11:38:49 +01:00
m_p2p . send_stop_signal ( ) ;
2015-02-05 10:11:20 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
Infinite Staking Part 2 (#406)
* Cleanup and undoing some protocol breakages
* Simplify expiration of nodes
* Request unlock schedules entire node for expiration
* Fix off by one in expiring nodes
* Undo expiring code for pre v10 nodes
* Fix RPC returning register as unlock height and not checking 0
* Rename key image unlock height const
* Undo testnet hardfork debug changes
* Remove is_type for get_type, fix missing var rename
* Move serialisable data into public namespace
* Serialise tx types properly
* Fix typo in no service node known msg
* Code review
* Fix == to >= on serialising tx type
* Code review 2
* Fix tests and key image unlock
* Add command to print locked key images
* Update ui to display lock stakes, query in print cmd blacklist
* Modify print stakes to be less slow
* Remove autostaking code
* Refactor staking into sweep functions
It appears staking was derived off stake_main written separately at
implementation at the beginning. This merges them back into a common
code path, after removing autostake there's only some minor differences.
It also makes sure that any changes to sweeping upstream are going to be
considered in the staking process which we want.
* Display unlock height for stakes
* Begin creating output blacklist
* Make blacklist output a migration step
* Implement get_output_blacklist for lmdb
* In wallet output selection ignore blacklisted outputs
* Apply blacklisted outputs to output selection
* Fix broken tests, switch key image unlock
* Fix broken unit_tests
* Begin change to limit locked key images to 4 globally
* Revamp prepare registration for new min contribution rules
* Fix up old back case in prepare registration
* Remove debug code
* Cleanup debug code and some unecessary changes
* Fix migration step on mainnet db
* Fix blacklist outputs for pre-existing DB's
* Remove irrelevant note
* Tweak scanning addresses for locked stakes
Since we only now allow contributions from the primary address we can
skip checking all subaddress + lookahead to speed up wallet scanning
* Define macro for SCNu64 for Mingw
* Fix failure on empty DB
* Add missing error msg, remove contributor from stake
* Improve staking messages
* Flush prompt to always display
* Return the msg from stake failure and fix stake parsing error
* Tweak fork rules for smaller bulletproofs
* Tweak pooled nodes minimum amounts
* Fix crash on exit, there's no need to store on destructor
Since all information about service nodes is derived from the blockchain
and we store state every time we receive a block, storing in the
destructor is redundant as there is no new information to store.
* Make prompt be consistent with CLI
* Check max number of key images from per user to node
* Implement error message on get_output_blacklist failure
* Remove resolved TODO's/comments
* Handle infinite staking in print_sn
* Atoi->strtol, fix prepare_registration, virtual override, stale msgs
2019-02-14 02:12:57 +01:00
//
// Loki
//
bool core_rpc_server : : on_get_output_blacklist_bin ( const COMMAND_RPC_GET_OUTPUT_BLACKLIST : : request & req , COMMAND_RPC_GET_OUTPUT_BLACKLIST : : response & res , const connection_context * ctx )
{
PERF_TIMER ( on_get_output_blacklist_bin ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_BLACKLIST > ( invoke_http_mode : : BIN , " /get_output_blacklist.bin " , req , res , r ) )
return r ;
res . status = " Failed " ;
try
{
m_core . get_output_blacklist ( res . blacklist ) ;
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output blacklist " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblockcount ( const COMMAND_RPC_GETBLOCKCOUNT : : request & req , COMMAND_RPC_GETBLOCKCOUNT : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblockcount ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 12:46:11 +01:00
res . count = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblockhash ( const COMMAND_RPC_GETBLOCKHASH : : request & req , COMMAND_RPC_GETBLOCKHASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblockhash ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-03 23:07:58 +01:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong parameters, expected height " ;
return false ;
}
uint64_t h = req [ 0 ] ;
if ( m_core . get_current_blockchain_height ( ) < = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( h ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-03-03 23:07:58 +01:00
}
res = string_tools : : pod_to_hex ( m_core . get_block_id_by_height ( h ) ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-10-06 11:27:34 +02:00
// equivalent of strstr, but with arbitrary bytes (ie, NULs)
// This does not differentiate between "not found" and "found at offset 0"
2018-11-23 14:11:40 +01:00
size_t slow_memmem ( const void * start_buff , size_t buflen , const void * pat , size_t patlen )
2014-03-03 23:07:58 +01:00
{
2014-10-06 11:27:34 +02:00
const void * buf = start_buff ;
const void * end = ( const char * ) buf + buflen ;
if ( patlen > buflen | | patlen = = 0 ) return 0 ;
while ( buflen > 0 & & ( buf = memchr ( buf , ( ( const char * ) pat ) [ 0 ] , buflen - patlen + 1 ) ) )
2014-03-03 23:07:58 +01:00
{
if ( memcmp ( buf , pat , patlen ) = = 0 )
2014-10-06 11:27:34 +02:00
return ( const char * ) buf - ( const char * ) start_buff ;
buf = ( const char * ) buf + 1 ;
buflen = ( const char * ) end - ( const char * ) buf ;
2014-03-03 23:07:58 +01:00
}
return 0 ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblocktemplate ( const COMMAND_RPC_GETBLOCKTEMPLATE : : request & req , COMMAND_RPC_GETBLOCKTEMPLATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblocktemplate ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GETBLOCKTEMPLATE > ( invoke_http_mode : : JON_RPC , " getblocktemplate " , req , res , r ) )
return r ;
2014-03-20 12:46:11 +01:00
if ( ! check_core_ready ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_CORE_BUSY ;
error_resp . message = " Core is busy " ;
return false ;
}
2014-03-03 23:07:58 +01:00
if ( req . reserve_size > 255 )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_RESERVE_SIZE ;
2018-03-01 12:36:19 +01:00
error_resp . message = " Too big reserved size, maximum 255 " ;
2014-03-03 23:07:58 +01:00
return false ;
}
2017-02-19 03:42:10 +01:00
cryptonote : : address_parse_info info ;
2014-03-03 23:07:58 +01:00
2018-11-16 06:31:11 +01:00
if ( ! req . wallet_address . size ( ) | | ! cryptonote : : get_account_address_from_str ( info , m_core . get_nettype ( ) , req . wallet_address ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_WALLET_ADDRESS ;
error_resp . message = " Failed to parse wallet address " ;
return false ;
}
2017-02-19 03:42:10 +01:00
if ( info . is_subaddress )
{
error_resp . code = CORE_RPC_ERROR_CODE_MINING_TO_SUBADDRESS ;
error_resp . message = " Mining to subaddress is not supported yet " ;
return false ;
}
2014-03-03 23:07:58 +01:00
2018-11-19 18:55:53 +01:00
block b ;
2014-03-03 23:07:58 +01:00
cryptonote : : blobdata blob_reserve ;
blob_reserve . resize ( req . reserve_size , 0 ) ;
2019-01-31 11:44:08 +01:00
cryptonote : : difficulty_type wdiff ;
2019-03-23 17:20:08 +01:00
crypto : : hash prev_block ;
if ( ! req . prev_block . empty ( ) )
{
if ( ! epee : : string_tools : : hex_to_pod ( req . prev_block , prev_block ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Invalid prev_block " ;
return false ;
}
}
if ( ! m_core . get_block_template ( b , req . prev_block . empty ( ) ? NULL : & prev_block , info . address , wdiff , res . height , res . expected_reward , blob_reserve ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to create block template " ) ;
return false ;
}
blobdata block_blob = t_serializable_object_to_blob ( b ) ;
2014-05-03 18:19:43 +02:00
crypto : : public_key tx_pub_key = cryptonote : : get_tx_pub_key_from_extra ( b . miner_tx ) ;
2017-10-10 16:47:08 +02:00
if ( tx_pub_key = = crypto : : null_pkey )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
2018-10-05 04:12:53 +02:00
LOG_ERROR ( " Failed to get tx pub key in coinbase extra " ) ;
2014-03-03 23:07:58 +01:00
return false ;
}
res . reserved_offset = slow_memmem ( ( void * ) block_blob . data ( ) , block_blob . size ( ) , & tx_pub_key , sizeof ( tx_pub_key ) ) ;
if ( ! res . reserved_offset )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to find tx pub key in blockblob " ) ;
return false ;
}
2019-04-12 05:28:54 +02:00
if ( req . reserve_size )
res . reserved_offset + = sizeof ( tx_pub_key ) + 2 ; //2 bytes: tag for TX_EXTRA_NONCE(1 byte), counter in TX_EXTRA_NONCE(1 byte)
else
res . reserved_offset = 0 ;
2014-03-03 23:07:58 +01:00
if ( res . reserved_offset + req . reserve_size > block_blob . size ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to calculate offset for " ) ;
return false ;
}
2016-03-30 03:50:51 +02:00
blobdata hashing_blob = get_block_hashing_blob ( b ) ;
2015-01-06 17:37:10 +01:00
res . prev_hash = string_tools : : pod_to_hex ( b . prev_id ) ;
2014-03-03 23:07:58 +01:00
res . blocktemplate_blob = string_tools : : buff_to_hex_nodelimer ( block_blob ) ;
2016-03-30 03:50:51 +02:00
res . blockhashing_blob = string_tools : : buff_to_hex_nodelimer ( hashing_blob ) ;
2014-05-25 19:06:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_submitblock ( const COMMAND_RPC_SUBMITBLOCK : : request & req , COMMAND_RPC_SUBMITBLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_submitblock ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 12:46:11 +01:00
CHECK_CORE_READY ( ) ;
2014-03-03 23:07:58 +01:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong param " ;
return false ;
}
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req [ 0 ] , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2019-02-15 01:13:27 +01:00
2014-06-11 16:46:56 +02:00
// Fixing of high orphan issue for most pools
// Thanks Boolberry!
2018-11-19 18:55:53 +01:00
block b ;
2014-06-11 16:46:56 +02:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2014-06-11 23:32:53 +02:00
// Fix from Boolberry neglects to check block
// size, do that with the function below
if ( ! m_core . check_incoming_block_size ( blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB_SIZE ;
error_resp . message = " Block bloc size is too big, rejecting block " ;
return false ;
}
2019-03-23 17:20:08 +01:00
block_verification_context bvc ;
if ( ! m_core . handle_block_found ( b , bvc ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_BLOCK_NOT_ACCEPTED ;
error_resp . message = " Block not accepted " ;
return false ;
}
2014-05-25 19:06:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_generateblocks ( const COMMAND_RPC_GENERATEBLOCKS : : request & req , COMMAND_RPC_GENERATEBLOCKS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-06-14 21:11:49 +02:00
{
PERF_TIMER ( on_generateblocks ) ;
CHECK_CORE_READY ( ) ;
2019-02-15 01:13:27 +01:00
2018-06-14 21:11:49 +02:00
res . status = CORE_RPC_STATUS_OK ;
if ( m_core . get_nettype ( ) ! = FAKECHAIN )
{
error_resp . code = CORE_RPC_ERROR_CODE_REGTEST_REQUIRED ;
2019-02-15 01:13:27 +01:00
error_resp . message = " Regtest required when generating blocks " ;
2018-06-14 21:11:49 +02:00
return false ;
}
COMMAND_RPC_GETBLOCKTEMPLATE : : request template_req ;
COMMAND_RPC_GETBLOCKTEMPLATE : : response template_res ;
COMMAND_RPC_SUBMITBLOCK : : request submit_req ;
COMMAND_RPC_SUBMITBLOCK : : response submit_res ;
template_req . reserve_size = 1 ;
template_req . wallet_address = req . wallet_address ;
2019-03-23 17:20:08 +01:00
template_req . prev_block = req . prev_block ;
2018-06-14 21:11:49 +02:00
submit_req . push_back ( boost : : value_initialized < std : : string > ( ) ) ;
res . height = m_core . get_blockchain_storage ( ) . get_current_blockchain_height ( ) ;
for ( size_t i = 0 ; i < req . amount_of_blocks ; i + + )
{
2019-04-14 11:21:09 +02:00
bool r = on_getblocktemplate ( template_req , template_res , error_resp , ctx ) ;
2018-06-14 21:11:49 +02:00
res . status = template_res . status ;
2019-03-23 17:20:08 +01:00
template_req . prev_block . clear ( ) ;
2018-06-14 21:11:49 +02:00
if ( ! r ) return false ;
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( template_res . blocktemplate_blob , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2018-11-19 18:55:53 +01:00
block b ;
2018-06-14 21:11:49 +02:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2019-03-23 17:20:08 +01:00
b . nonce = req . starting_nonce ;
2018-06-14 21:11:49 +02:00
miner : : find_nonce_for_given_block ( b , template_res . difficulty , template_res . height ) ;
submit_req . front ( ) = string_tools : : buff_to_hex_nodelimer ( block_to_blob ( b ) ) ;
2019-01-11 20:09:39 +01:00
r = on_submitblock ( submit_req , submit_res , error_resp , ctx ) ;
2018-06-14 21:11:49 +02:00
res . status = submit_res . status ;
if ( ! r ) return false ;
2019-03-23 17:20:08 +01:00
res . blocks . push_back ( epee : : string_tools : : pod_to_hex ( get_block_hash ( b ) ) ) ;
template_req . prev_block = res . blocks . back ( ) ;
2018-06-14 21:11:49 +02:00
res . height = template_res . height ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-04-09 14:14:35 +02:00
uint64_t core_rpc_server : : get_block_reward ( const block & blk )
{
uint64_t reward = 0 ;
2017-01-22 21:38:10 +01:00
for ( const tx_out & out : blk . miner_tx . vout )
2014-04-09 14:14:35 +02:00
{
reward + = out . amount ;
}
return reward ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-04-05 16:15:15 +02:00
bool core_rpc_server : : fill_block_header_response ( const block & blk , bool orphan_status , uint64_t height , const crypto : : hash & hash , block_header_response & response , bool fill_pow_hash )
2016-09-29 15:38:12 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( fill_block_header_response ) ;
2016-09-29 15:38:12 +02:00
response . major_version = blk . major_version ;
response . minor_version = blk . minor_version ;
response . timestamp = blk . timestamp ;
response . prev_hash = string_tools : : pod_to_hex ( blk . prev_id ) ;
response . nonce = blk . nonce ;
response . orphan_status = orphan_status ;
response . height = height ;
response . depth = m_core . get_current_blockchain_height ( ) - height - 1 ;
response . hash = string_tools : : pod_to_hex ( hash ) ;
response . difficulty = m_core . get_blockchain_storage ( ) . block_difficulty ( height ) ;
2018-09-13 14:41:36 +02:00
response . cumulative_difficulty = response . block_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( height ) ;
2016-09-29 15:38:12 +02:00
response . reward = get_block_reward ( blk ) ;
2018-12-18 07:49:36 +01:00
response . miner_reward = blk . miner_tx . vout [ 0 ] . amount ;
2018-07-18 23:24:53 +02:00
response . block_size = response . block_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_weight ( height ) ;
2017-01-08 12:14:11 +01:00
response . num_txes = blk . tx_hashes . size ( ) ;
2018-04-05 16:15:15 +02:00
response . pow_hash = fill_pow_hash ? string_tools : : pod_to_hex ( get_block_longhash ( blk , height ) ) : " " ;
2019-01-21 18:18:50 +01:00
response . long_term_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_long_term_weight ( height ) ;
2019-03-21 01:23:23 +01:00
response . miner_tx_hash = string_tools : : pod_to_hex ( cryptonote : : get_transaction_hash ( blk . miner_tx ) ) ;
2014-04-09 14:14:35 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-01-20 11:38:14 +01:00
template < typename COMMAND_TYPE >
bool core_rpc_server : : use_bootstrap_daemon_if_necessary ( const invoke_http_mode & mode , const std : : string & command_name , const typename COMMAND_TYPE : : request & req , typename COMMAND_TYPE : : response & res , bool & r )
{
res . untrusted = false ;
if ( m_bootstrap_daemon_address . empty ( ) )
return false ;
boost : : unique_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( ! m_should_use_bootstrap_daemon )
{
MINFO ( " The local daemon is fully synced. Not switching back to the bootstrap daemon " ) ;
return false ;
}
auto current_time = std : : chrono : : system_clock : : now ( ) ;
if ( current_time - m_bootstrap_height_check_time > std : : chrono : : seconds ( 30 ) ) // update every 30s
{
m_bootstrap_height_check_time = current_time ;
uint64_t top_height ;
crypto : : hash top_hash ;
m_core . get_blockchain_top ( top_height , top_hash ) ;
+ + top_height ; // turn top block height into blockchain height
// query bootstrap daemon's height
cryptonote : : COMMAND_RPC_GET_HEIGHT : : request getheight_req ;
cryptonote : : COMMAND_RPC_GET_HEIGHT : : response getheight_res ;
bool ok = epee : : net_utils : : invoke_http_json ( " /getheight " , getheight_req , getheight_res , m_http_client ) ;
ok = ok & & getheight_res . status = = CORE_RPC_STATUS_OK ;
m_should_use_bootstrap_daemon = ok & & top_height + 10 < getheight_res . height ;
MINFO ( ( m_should_use_bootstrap_daemon ? " Using " : " Not using " ) < < " the bootstrap daemon (our height: " < < top_height < < " , bootstrap daemon's height: " < < getheight_res . height < < " ) " ) ;
}
if ( ! m_should_use_bootstrap_daemon )
return false ;
if ( mode = = invoke_http_mode : : JON )
{
r = epee : : net_utils : : invoke_http_json ( command_name , req , res , m_http_client ) ;
}
else if ( mode = = invoke_http_mode : : BIN )
{
r = epee : : net_utils : : invoke_http_bin ( command_name , req , res , m_http_client ) ;
}
else if ( mode = = invoke_http_mode : : JON_RPC )
{
epee : : json_rpc : : request < typename COMMAND_TYPE : : request > json_req = AUTO_VAL_INIT ( json_req ) ;
epee : : json_rpc : : response < typename COMMAND_TYPE : : response , std : : string > json_resp = AUTO_VAL_INIT ( json_resp ) ;
json_req . jsonrpc = " 2.0 " ;
json_req . id = epee : : serialization : : storage_entry ( 0 ) ;
json_req . method = command_name ;
json_req . params = req ;
r = net_utils : : invoke_http_json ( " /json_rpc " , json_req , json_resp , m_http_client ) ;
if ( r )
res = json_resp . result ;
}
else
{
MERROR ( " Unknown invoke_http_mode: " < < mode ) ;
return false ;
}
m_was_bootstrap_ever_used = true ;
r = r & & res . status = = CORE_RPC_STATUS_OK ;
res . untrusted = true ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_last_block_header ( const COMMAND_RPC_GET_LAST_BLOCK_HEADER : : request & req , COMMAND_RPC_GET_LAST_BLOCK_HEADER : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-04-09 14:14:35 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_last_block_header ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LAST_BLOCK_HEADER > ( invoke_http_mode : : JON_RPC , " getlastblockheader " , req , res , r ) )
return r ;
2017-10-20 21:49:23 +02:00
CHECK_CORE_READY ( ) ;
2014-04-09 14:14:35 +02:00
uint64_t last_block_height ;
crypto : : hash last_block_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( last_block_height , last_block_hash ) ;
2014-04-09 14:14:35 +02:00
block last_block ;
bool have_last_block = m_core . get_block_by_hash ( last_block_hash , last_block ) ;
if ( ! have_last_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get last block. " ;
return false ;
}
2019-03-01 17:14:51 +01:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( last_block , false , last_block_height , last_block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_header_by_hash ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_header_by_hash ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH > ( invoke_http_mode : : JON_RPC , " getblockheaderbyhash " , req , res , r ) )
return r ;
2014-04-09 14:14:35 +02:00
crypto : : hash block_hash ;
bool hash_parsed = parse_hash256 ( req . hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + req . hash + ' . ' ;
return false ;
}
block blk ;
2017-01-22 13:20:55 +01:00
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
2014-04-09 14:14:35 +02:00
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + req . hash + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
2019-03-01 17:14:51 +01:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_headers_range ( const COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : request & req , COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_headers_range ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADERS_RANGE > ( invoke_http_mode : : JON_RPC , " getblockheadersrange " , req , res , r ) )
return r ;
2016-10-02 11:21:21 +02:00
const uint64_t bc_height = m_core . get_current_blockchain_height ( ) ;
if ( req . start_height > = bc_height | | req . end_height > = bc_height | | req . start_height > req . end_height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
error_resp . message = " Invalid start/end heights. " ;
return false ;
}
for ( uint64_t h = req . start_height ; h < = req . end_height ; + + h )
{
crypto : : hash block_hash = m_core . get_block_id_by_height ( h ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by height. Height = " + boost : : lexical_cast < std : : string > ( h ) + " . Hash = " + epee : : string_tools : : pod_to_hex ( block_hash ) + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2016-10-02 11:21:21 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
if ( block_height ! = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong height " ;
return false ;
}
2016-10-04 13:55:55 +02:00
res . headers . push_back ( block_header_response ( ) ) ;
2019-03-01 17:14:51 +01:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , false , block_height , block_hash , res . headers . back ( ) , req . fill_pow_hash & & ! restricted ) ;
2017-06-18 10:12:54 +02:00
if ( ! response_filled )
2016-10-02 11:21:21 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_header_by_height ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_header_by_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT > ( invoke_http_mode : : JON_RPC , " getblockheaderbyheight " , req , res , r ) )
return r ;
2014-04-09 14:14:35 +02:00
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-04-09 14:14:35 +02:00
return false ;
}
crypto : : hash block_hash = m_core . get_block_id_by_height ( req . height ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2014-04-30 19:52:21 +02:00
error_resp . message = " Internal error: can't get block by height. Height = " + std : : to_string ( req . height ) + ' . ' ;
2014-04-09 14:14:35 +02:00
return false ;
}
2019-03-01 17:14:51 +01:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , false , req . height , block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block ( const COMMAND_RPC_GET_BLOCK : : request & req , COMMAND_RPC_GET_BLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK > ( invoke_http_mode : : JON_RPC , " getblock " , req , res , r ) )
return r ;
2015-10-13 22:37:35 +02:00
crypto : : hash block_hash ;
if ( ! req . hash . empty ( ) )
{
bool hash_parsed = parse_hash256 ( req . hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + req . hash + ' . ' ;
return false ;
}
}
else
{
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2015-10-13 22:37:35 +02:00
return false ;
}
block_hash = m_core . get_block_id_by_height ( req . height ) ;
}
block blk ;
2017-01-22 13:20:55 +01:00
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
2015-10-13 22:37:35 +02:00
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + req . hash + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2015-10-13 22:37:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
2019-03-01 17:14:51 +01:00
const bool restricted = m_restricted & & ctx ;
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , res . block_header , req . fill_pow_hash & & ! restricted ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2015-10-13 22:37:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
2017-10-28 16:25:47 +02:00
res . miner_tx_hash = epee : : string_tools : : pod_to_hex ( cryptonote : : get_transaction_hash ( blk . miner_tx ) ) ;
2015-10-13 22:37:35 +02:00
for ( size_t n = 0 ; n < blk . tx_hashes . size ( ) ; + + n )
{
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( blk . tx_hashes [ n ] ) ) ;
}
2016-06-09 22:48:29 +02:00
res . blob = string_tools : : buff_to_hex_nodelimer ( t_serializable_object_to_blob ( blk ) ) ;
2015-10-13 22:37:35 +02:00
res . json = obj_to_json_str ( blk ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_connections ( const COMMAND_RPC_GET_CONNECTIONS : : request & req , COMMAND_RPC_GET_CONNECTIONS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-19 01:33:03 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_connections ) ;
2014-07-19 01:33:03 +02:00
res . connections = m_p2p . get_payload_object ( ) . get_connections ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_info_json ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-22 20:00:10 +02:00
{
2019-03-29 13:56:47 +01:00
return on_get_info ( req , res , ctx ) ;
2014-07-22 20:00:10 +02:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_hard_fork_info ( const COMMAND_RPC_HARD_FORK_INFO : : request & req , COMMAND_RPC_HARD_FORK_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-09-19 17:34:29 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_hard_fork_info ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_HARD_FORK_INFO > ( invoke_http_mode : : JON_RPC , " hard_fork_info " , req , res , r ) )
return r ;
2015-09-19 17:34:29 +02:00
const Blockchain & blockchain = m_core . get_blockchain_storage ( ) ;
2016-08-12 20:19:25 +02:00
uint8_t version = req . version > 0 ? req . version : blockchain . get_next_hard_fork_version ( ) ;
2015-09-19 17:34:29 +02:00
res . version = blockchain . get_current_hard_fork_version ( ) ;
2015-12-19 15:52:30 +01:00
res . enabled = blockchain . get_hard_fork_voting_info ( version , res . window , res . votes , res . threshold , res . earliest_height , res . voting ) ;
2015-09-19 17:34:29 +02:00
res . state = blockchain . get_hard_fork_state ( ) ;
2015-10-26 11:17:48 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-09-19 17:34:29 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_bans ( const COMMAND_RPC_GETBANS : : request & req , COMMAND_RPC_GETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 01:04:22 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_bans ) ;
2015-11-26 01:04:22 +01:00
2016-03-12 14:44:55 +01:00
auto now = time ( nullptr ) ;
2017-05-27 12:35:54 +02:00
std : : map < std : : string , time_t > blocked_hosts = m_p2p . get_blocked_hosts ( ) ;
for ( std : : map < std : : string , time_t > : : const_iterator i = blocked_hosts . begin ( ) ; i ! = blocked_hosts . end ( ) ; + + i )
2015-11-26 01:04:22 +01:00
{
2016-03-12 14:44:55 +01:00
if ( i - > second > now ) {
COMMAND_RPC_GETBANS : : ban b ;
2017-05-27 12:35:54 +02:00
b . host = i - > first ;
b . ip = 0 ;
uint32_t ip ;
if ( epee : : string_tools : : get_ip_int32_from_string ( ip , i - > first ) )
b . ip = ip ;
2016-03-12 14:44:55 +01:00
b . seconds = i - > second - now ;
res . bans . push_back ( b ) ;
}
2015-11-26 01:04:22 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_bans ( const COMMAND_RPC_SETBANS : : request & req , COMMAND_RPC_SETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 01:04:22 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_bans ) ;
2015-11-26 01:04:22 +01:00
for ( auto i = req . bans . begin ( ) ; i ! = req . bans . end ( ) ; + + i )
{
2017-05-27 12:35:54 +02:00
epee : : net_utils : : network_address na ;
if ( ! i - > host . empty ( ) )
{
2019-04-09 10:07:13 +02:00
auto na_parsed = net : : get_network_address ( i - > host , 0 ) ;
if ( ! na_parsed )
2017-05-27 12:35:54 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Unsupported host type " ;
return false ;
}
2019-04-09 10:07:13 +02:00
na = std : : move ( * na_parsed ) ;
2017-05-27 12:35:54 +02:00
}
else
{
2017-08-25 17:14:46 +02:00
na = epee : : net_utils : : ipv4_network_address { i - > ip , 0 } ;
2017-05-27 12:35:54 +02:00
}
2015-11-26 01:04:22 +01:00
if ( i - > ban )
2017-05-27 12:35:54 +02:00
m_p2p . block_host ( na , i - > seconds ) ;
2015-11-26 01:04:22 +01:00
else
2017-05-27 12:35:54 +02:00
m_p2p . unblock_host ( na ) ;
2015-11-26 01:04:22 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_flush_txpool ( const COMMAND_RPC_FLUSH_TRANSACTION_POOL : : request & req , COMMAND_RPC_FLUSH_TRANSACTION_POOL : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-01-30 14:28:26 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_flush_txpool ) ;
2016-01-30 14:28:26 +01:00
bool failed = false ;
2018-04-16 01:16:02 +02:00
std : : vector < crypto : : hash > txids ;
2016-01-30 14:28:26 +01:00
if ( req . txids . empty ( ) )
{
2018-04-16 01:16:02 +02:00
std : : vector < transaction > pool_txs ;
2016-01-30 14:28:26 +01:00
bool r = m_core . get_pool_transactions ( pool_txs ) ;
if ( ! r )
{
res . status = " Failed to get txpool contents " ;
return true ;
}
for ( const auto & tx : pool_txs )
{
txids . push_back ( cryptonote : : get_transaction_hash ( tx ) ) ;
}
}
else
{
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
failed = true ;
}
2017-12-07 22:33:20 +01:00
else
{
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
txids . push_back ( txid ) ;
}
2016-01-30 14:28:26 +01:00
}
}
if ( ! m_core . get_blockchain_storage ( ) . flush_txes_from_pool ( txids ) )
{
2017-12-07 22:33:20 +01:00
res . status = " Failed to remove one or more tx(es) " ;
2016-01-30 14:28:26 +01:00
return false ;
}
if ( failed )
{
2017-12-07 22:33:20 +01:00
if ( txids . empty ( ) )
res . status = " Failed to parse txid " ;
else
res . status = " Failed to parse some of the txids " ;
2016-01-30 14:28:26 +01:00
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_histogram ( const COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : request & req , COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-03-26 15:30:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_output_histogram ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_HISTOGRAM > ( invoke_http_mode : : JON_RPC , " get_output_histogram " , req , res , r ) )
return r ;
2016-03-26 15:30:23 +01:00
2016-09-17 16:45:51 +02:00
std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > histogram ;
2016-03-26 15:30:23 +01:00
try
{
2018-03-22 18:51:58 +01:00
histogram = m_core . get_blockchain_storage ( ) . get_output_histogram ( req . amounts , req . unlocked , req . recent_cutoff , req . min_count ) ;
2016-03-26 15:30:23 +01:00
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output histogram " ;
return true ;
}
res . histogram . clear ( ) ;
res . histogram . reserve ( histogram . size ( ) ) ;
for ( const auto & i : histogram )
{
2016-09-17 16:45:51 +02:00
if ( std : : get < 0 > ( i . second ) > = req . min_count & & ( std : : get < 0 > ( i . second ) < = req . max_count | | req . max_count = = 0 ) )
res . histogram . push_back ( COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : entry ( i . first , std : : get < 0 > ( i . second ) , std : : get < 1 > ( i . second ) , std : : get < 2 > ( i . second ) ) ) ;
2016-03-26 15:30:23 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_version ( const COMMAND_RPC_GET_VERSION : : request & req , COMMAND_RPC_GET_VERSION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-07-10 17:49:40 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_version ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_VERSION > ( invoke_http_mode : : JON_RPC , " get_version " , req , res , r ) )
return r ;
2016-07-10 17:49:40 +02:00
res . version = CORE_RPC_VERSION ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_coinbase_tx_sum ( const COMMAND_RPC_GET_COINBASE_TX_SUM : : request & req , COMMAND_RPC_GET_COINBASE_TX_SUM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-10 21:45:51 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_coinbase_tx_sum ) ;
2016-10-11 01:55:18 +02:00
std : : pair < uint64_t , uint64_t > amounts = m_core . get_coinbase_tx_sum ( req . height , req . count ) ;
res . emission_amount = amounts . first ;
res . fee_amount = amounts . second ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2016-10-10 21:45:51 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_base_fee_estimate ( const COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : request & req , COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-28 22:19:40 +02:00
{
2018-07-18 23:24:53 +02:00
PERF_TIMER ( on_get_base_fee_estimate ) ;
2018-01-20 11:38:14 +01:00
bool r ;
2018-07-18 23:24:53 +02:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BASE_FEE_ESTIMATE > ( invoke_http_mode : : JON_RPC , " get_fee_estimate " , req , res , r ) )
2018-01-20 11:38:14 +01:00
return r ;
2018-07-18 23:24:53 +02:00
res . fee = m_core . get_blockchain_storage ( ) . get_dynamic_base_fee_estimate ( req . grace_blocks ) ;
res . quantization_mask = Blockchain : : get_fee_quantization_mask ( ) ;
2016-10-28 22:19:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_alternate_chains ( const COMMAND_RPC_GET_ALTERNATE_CHAINS : : request & req , COMMAND_RPC_GET_ALTERNATE_CHAINS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-12-17 12:25:15 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_alternate_chains ) ;
2016-12-17 12:25:15 +01:00
try
{
2018-05-20 00:53:05 +02:00
std : : list < std : : pair < Blockchain : : block_extended_info , std : : vector < crypto : : hash > > > chains = m_core . get_blockchain_storage ( ) . get_alternative_chains ( ) ;
2016-12-17 12:25:15 +01:00
for ( const auto & i : chains )
{
2018-05-20 00:53:05 +02:00
res . chains . push_back ( COMMAND_RPC_GET_ALTERNATE_CHAINS : : chain_info { epee : : string_tools : : pod_to_hex ( get_block_hash ( i . first . bl ) ) , i . first . height , i . second . size ( ) , i . first . cumulative_difficulty , { } , std : : string ( ) } ) ;
res . chains . back ( ) . block_hashes . reserve ( i . second . size ( ) ) ;
for ( const crypto : : hash & block_id : i . second )
res . chains . back ( ) . block_hashes . push_back ( epee : : string_tools : : pod_to_hex ( block_id ) ) ;
if ( i . first . height < i . second . size ( ) )
{
res . status = " Error finding alternate chain attachment point " ;
return true ;
}
cryptonote : : block main_chain_parent_block ;
try { main_chain_parent_block = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( i . first . height - i . second . size ( ) ) ; }
catch ( const std : : exception & e ) { res . status = " Error finding alternate chain attachment point " ; return true ; }
res . chains . back ( ) . main_chain_parent_block = epee : : string_tools : : pod_to_hex ( get_block_hash ( main_chain_parent_block ) ) ;
2016-12-17 12:25:15 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
}
catch ( . . . )
{
res . status = " Error retrieving alternate chains " ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_limit ( const COMMAND_RPC_GET_LIMIT : : request & req , COMMAND_RPC_GET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 21:19:53 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_limit ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LIMIT > ( invoke_http_mode : : JON , " /get_limit " , req , res , r ) )
return r ;
2017-09-17 21:19:53 +02:00
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_limit ( const COMMAND_RPC_SET_LIMIT : : request & req , COMMAND_RPC_SET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 21:19:53 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_limit ) ;
2017-09-17 21:19:53 +02:00
// -1 = reset to default
// 0 = do not modify
if ( req . limit_down > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_down_limit ( req . limit_down ) ;
}
else if ( req . limit_down < 0 )
{
if ( req . limit_down ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 15:26:17 +01:00
epee : : net_utils : : connection_basic : : set_rate_down_limit ( nodetool : : default_limit_down ) ;
2017-09-17 21:19:53 +02:00
}
if ( req . limit_up > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_up_limit ( req . limit_up ) ;
}
else if ( req . limit_up < 0 )
{
if ( req . limit_up ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 15:26:17 +01:00
epee : : net_utils : : connection_basic : : set_rate_up_limit ( nodetool : : default_limit_up ) ;
2017-09-17 21:19:53 +02:00
}
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_out_peers ( const COMMAND_RPC_OUT_PEERS : : request & req , COMMAND_RPC_OUT_PEERS : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_out_peers ) ;
2019-04-09 10:07:13 +02:00
m_p2p . change_max_out_public_peers ( req . out_peers ) ;
2017-10-06 09:40:14 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2015-04-01 19:00:45 +02:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_in_peers ( const COMMAND_RPC_IN_PEERS : : request & req , COMMAND_RPC_IN_PEERS : : response & res , const connection_context * ctx )
2018-01-20 22:44:23 +01:00
{
PERF_TIMER ( on_in_peers ) ;
2019-04-09 10:07:13 +02:00
m_p2p . change_max_in_public_peers ( req . in_peers ) ;
2018-01-20 22:44:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_start_save_graph ( const COMMAND_RPC_START_SAVE_GRAPH : : request & req , COMMAND_RPC_START_SAVE_GRAPH : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_start_save_graph ) ;
2015-04-01 19:00:45 +02:00
m_p2p . set_save_graph ( true ) ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-04-01 19:00:45 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_save_graph ( const COMMAND_RPC_STOP_SAVE_GRAPH : : request & req , COMMAND_RPC_STOP_SAVE_GRAPH : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_save_graph ) ;
2015-04-01 19:00:45 +02:00
m_p2p . set_save_graph ( false ) ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-04-01 19:00:45 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_update ( const COMMAND_RPC_UPDATE : : request & req , COMMAND_RPC_UPDATE : : response & res , const connection_context * ctx )
2017-02-25 00:16:13 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_update ) ;
2018-04-25 08:13:38 +02:00
static const char software [ ] = " loki " ;
2017-03-04 19:45:33 +01:00
# ifdef BUILD_TAG
static const char buildtag [ ] = BOOST_PP_STRINGIZE ( BUILD_TAG ) ;
2017-09-22 22:48:19 +02:00
static const char subdir [ ] = " cli " ;
2017-02-25 00:16:13 +01:00
# else
static const char buildtag [ ] = " source " ;
2017-09-22 22:48:19 +02:00
static const char subdir [ ] = " source " ;
2017-02-25 00:16:13 +01:00
# endif
if ( req . command ! = " check " & & req . command ! = " download " & & req . command ! = " update " )
{
res . status = std : : string ( " unknown command: ' " ) + req . command + " ' " ;
return true ;
}
std : : string version , hash ;
if ( ! tools : : check_updates ( software , buildtag , version , hash ) )
{
res . status = " Error checking for updates " ;
return true ;
}
2018-04-10 06:49:20 +02:00
if ( tools : : vercmp ( version . c_str ( ) , LOKI_VERSION ) < = 0 )
2017-02-25 00:16:13 +01:00
{
res . update = false ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . update = true ;
res . version = version ;
2017-09-22 22:48:19 +02:00
res . user_uri = tools : : get_update_url ( software , subdir , buildtag , version , true ) ;
res . auto_uri = tools : : get_update_url ( software , subdir , buildtag , version , false ) ;
2017-02-25 00:16:13 +01:00
res . hash = hash ;
if ( req . command = = " check " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
boost : : filesystem : : path path ;
if ( req . path . empty ( ) )
{
std : : string filename ;
const char * slash = strrchr ( res . auto_uri . c_str ( ) , ' / ' ) ;
if ( slash )
filename = slash + 1 ;
else
filename = std : : string ( software ) + " -update- " + version ;
path = epee : : string_tools : : get_current_module_folder ( ) ;
path / = filename ;
}
else
{
path = req . path ;
}
crypto : : hash file_hash ;
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) | | ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) ) )
{
MDEBUG ( " We don't have that file already, downloading " ) ;
if ( ! tools : : download ( path . string ( ) , res . auto_uri ) )
{
MERROR ( " Failed to download " < < res . auto_uri ) ;
return false ;
}
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) )
{
MERROR ( " Failed to hash " < < path ) ;
return false ;
}
if ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) )
{
MERROR ( " Download from " < < res . auto_uri < < " does not match the expected hash " ) ;
return false ;
}
MINFO ( " New version downloaded to " < < path ) ;
}
else
{
MDEBUG ( " We already have " < < path < < " with expected hash " ) ;
}
res . path = path . string ( ) ;
if ( req . command = = " download " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . status = " 'update' not implemented yet " ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_pop_blocks ( const COMMAND_RPC_POP_BLOCKS : : request & req , COMMAND_RPC_POP_BLOCKS : : response & res , const connection_context * ctx )
2018-11-25 22:08:07 +01:00
{
PERF_TIMER ( on_pop_blocks ) ;
m_core . get_blockchain_storage ( ) . pop_blocks ( req . nblocks ) ;
res . height = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_relay_tx ( const COMMAND_RPC_RELAY_TX : : request & req , COMMAND_RPC_RELAY_TX : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-04-02 13:17:35 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_relay_tx ) ;
2017-04-02 13:17:35 +02:00
bool failed = false ;
2017-12-07 22:33:20 +01:00
res . status = " " ;
2017-04-02 13:17:35 +02:00
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
2017-12-07 22:33:20 +01:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " invalid transaction id: " ) + str ;
2017-04-02 13:17:35 +02:00
failed = true ;
2017-12-07 22:33:20 +01:00
continue ;
2017-04-02 13:17:35 +02:00
}
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
2017-05-14 15:06:55 +02:00
cryptonote : : blobdata txblob ;
bool r = m_core . get_pool_transaction ( txid , txblob ) ;
2017-04-02 13:17:35 +02:00
if ( r )
{
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
NOTIFY_NEW_TRANSACTIONS : : request r ;
2017-05-14 15:06:55 +02:00
r . txs . push_back ( txblob ) ;
2017-04-02 13:17:35 +02:00
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
}
else
{
2017-12-07 22:33:20 +01:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " transaction not found in pool: " ) + str ;
2017-04-02 13:17:35 +02:00
failed = true ;
2017-12-07 22:33:20 +01:00
continue ;
2017-04-02 13:17:35 +02:00
}
}
if ( failed )
{
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_sync_info ( const COMMAND_RPC_SYNC_INFO : : request & req , COMMAND_RPC_SYNC_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-07-02 23:41:15 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_sync_info ) ;
2017-07-02 23:41:15 +02:00
crypto : : hash top_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2017-07-02 23:41:15 +02:00
+ + res . height ; // turn top block height into blockchain height
res . target_height = m_core . get_target_blockchain_height ( ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . next_needed_pruning_seed = m_p2p . get_payload_object ( ) . get_next_needed_pruning_stripe ( ) . second ;
2017-07-02 23:41:15 +02:00
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
res . peers . push_back ( { c } ) ;
const cryptonote : : block_queue & block_queue = m_p2p . get_payload_object ( ) . get_block_queue ( ) ;
block_queue . foreach ( [ & ] ( const cryptonote : : block_queue : : span & span ) {
2017-11-18 00:52:50 +01:00
const std : : string span_connection_id = epee : : string_tools : : pod_to_hex ( span . connection_id ) ;
2017-07-02 23:41:15 +02:00
uint32_t speed = ( uint32_t ) ( 100.0f * block_queue . get_speed ( span . connection_id ) + 0.5f ) ;
std : : string address = " " ;
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
2017-11-18 00:52:50 +01:00
if ( c . connection_id = = span_connection_id )
2017-07-02 23:41:15 +02:00
address = c . address ;
2017-11-18 00:52:50 +01:00
res . spans . push_back ( { span . start_block_height , span . nblocks , span_connection_id , ( uint32_t ) ( span . rate + 0.5f ) , speed , span . size , address } ) ;
2017-07-02 23:41:15 +02:00
return true ;
} ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . overview = block_queue . get_overview ( res . height ) ;
2017-07-02 23:41:15 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_txpool_backlog ( const COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-08-26 17:23:31 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_txpool_backlog ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG > ( invoke_http_mode : : JON_RPC , " get_txpool_backlog " , req , res , r ) )
return r ;
2017-08-26 17:23:31 +02:00
if ( ! m_core . get_txpool_backlog ( res . backlog ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get txpool backlog " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_distribution ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-02-19 12:15:15 +01:00
{
2018-04-08 14:23:49 +02:00
PERF_TIMER ( on_get_output_distribution ) ;
2018-05-22 15:46:30 +02:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : JON_RPC , " get_output_distribution " , req , res , r ) )
return r ;
2018-02-19 12:15:15 +01:00
try
{
2018-10-19 11:20:03 +02:00
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2018-02-19 12:15:15 +01:00
for ( uint64_t amount : req . amounts )
{
2018-11-06 15:23:26 +01:00
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , req . cumulative ) ;
2018-10-20 04:06:03 +02:00
if ( ! data )
2018-02-19 12:15:15 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2018-10-20 04:06:03 +02:00
error_resp . message = " Failed to get output distribution " ;
2018-02-19 12:15:15 +01:00
return false ;
}
2018-05-31 17:53:56 +02:00
2018-11-08 19:26:59 +01:00
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
2018-02-19 12:15:15 +01:00
}
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_distribution_bin ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , const connection_context * ctx )
2018-11-08 19:26:59 +01:00
{
PERF_TIMER ( on_get_output_distribution_bin ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : BIN , " /get_output_distribution.bin " , req , res , r ) )
return r ;
res . status = " Failed " ;
if ( ! req . binary )
{
res . status = " Binary only call " ;
return false ;
}
try
{
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
for ( uint64_t amount : req . amounts )
{
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , req . cumulative ) ;
if ( ! data )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
}
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_prune_blockchain ( const COMMAND_RPC_PRUNE_BLOCKCHAIN : : request & req , COMMAND_RPC_PRUNE_BLOCKCHAIN : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
{
try
{
if ( ! ( req . check ? m_core . check_blockchain_pruning ( ) : m_core . prune_blockchain ( ) ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = req . check ? " Failed to check blockchain pruning " : " Failed to prune blockchain " ;
return false ;
}
res . pruning_seed = m_core . get_blockchain_pruning_seed ( ) ;
2019-04-16 17:14:18 +02:00
res . pruned = res . pruning_seed ! = 0 ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to prune blockchain " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
2015-01-29 23:10:53 +01:00
2018-02-16 12:04:04 +01:00
const command_line : : arg_descriptor < std : : string , false , true , 2 > core_rpc_server : : arg_rpc_bind_port = {
2015-01-29 23:10:53 +01:00
" rpc-bind-port "
, " Port for RPC server "
, std : : to_string ( config : : RPC_DEFAULT_PORT )
2018-02-16 12:04:04 +01:00
, { { & cryptonote : : arg_testnet_on , & cryptonote : : arg_stagenet_on } }
2018-03-27 15:47:57 +02:00
, [ ] ( std : : array < bool , 2 > testnet_stagenet , bool defaulted , std : : string val ) - > std : : string {
2018-02-16 12:04:04 +01:00
if ( testnet_stagenet [ 0 ] & & defaulted )
2018-01-22 02:49:51 +01:00
return std : : to_string ( config : : testnet : : RPC_DEFAULT_PORT ) ;
2018-02-16 12:04:04 +01:00
else if ( testnet_stagenet [ 1 ] & & defaulted )
return std : : to_string ( config : : stagenet : : RPC_DEFAULT_PORT ) ;
2018-01-22 02:49:51 +01:00
return val ;
}
2015-01-29 23:10:53 +01:00
} ;
2019-01-30 06:59:47 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_login = {
" bootstrap-daemon-login "
, " Specify username:password for the bootstrap daemon login "
, " "
} ;
2017-11-16 04:58:11 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_restricted_bind_port = {
" rpc-restricted-bind-port "
, " Port for restricted RPC server "
, " "
} ;
2015-11-27 19:24:29 +01:00
const command_line : : arg_descriptor < bool > core_rpc_server : : arg_restricted_rpc = {
" restricted-rpc "
2017-11-08 13:06:41 +01:00
, " Restrict RPC to view only commands and do not return privacy sensitive data in RPC calls "
2015-11-27 19:24:29 +01:00
, false
} ;
2018-01-20 11:38:14 +01:00
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl = {
" rpc-ssl "
, " Enable SSL on RPC connections: enabled|disabled|autodetect "
, " autodetect "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl_private_key = {
" rpc-ssl-private-key "
, " Path to a PEM format private key "
, " "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl_certificate = {
" rpc-ssl-certificate "
, " Path to a PEM format certificate "
, " "
} ;
2019-03-12 03:01:03 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl_ca_certificates = {
" rpc-ssl-ca-certificates "
, " Path to file containing concatenated PEM format certificate(s) to replace system CA(s). "
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
} ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
const command_line : : arg_descriptor < std : : vector < std : : string > > core_rpc_server : : arg_rpc_ssl_allowed_fingerprints = {
" rpc-ssl-allowed-fingerprints "
, " List of certificate fingerprints to allow "
} ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
const command_line : : arg_descriptor < bool > core_rpc_server : : arg_rpc_ssl_allow_any_cert = {
" rpc-ssl-allow-any-cert "
2019-03-12 03:01:03 +01:00
, " Allow any peer certificate "
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
, false
} ;
2018-01-20 11:38:14 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_address = {
" bootstrap-daemon-address "
, " URL of a 'bootstrap' remote daemon that the connected wallets can use while this daemon is still not fully synced "
, " "
} ;
2019-01-30 05:18:15 +01:00
//
// Loki
//
bool core_rpc_server : : on_get_quorum_state ( const COMMAND_RPC_GET_QUORUM_STATE : : request & req , COMMAND_RPC_GET_QUORUM_STATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
PERF_TIMER ( on_get_quorum_state ) ;
bool r ;
2019-05-01 08:01:17 +02:00
const auto uptime_quorum = m_core . get_uptime_quorum ( req . height ) ;
r = ( uptime_quorum ! = nullptr ) ;
2019-01-30 05:18:15 +01:00
if ( r )
{
res . status = CORE_RPC_STATUS_OK ;
2019-05-01 08:01:17 +02:00
res . quorum_nodes . reserve ( uptime_quorum - > quorum_nodes . size ( ) ) ;
res . nodes_to_test . reserve ( uptime_quorum - > nodes_to_test . size ( ) ) ;
2019-01-30 05:18:15 +01:00
2019-05-01 08:01:17 +02:00
for ( const auto & key : uptime_quorum - > quorum_nodes )
2019-01-30 05:18:15 +01:00
res . quorum_nodes . push_back ( epee : : string_tools : : pod_to_hex ( key ) ) ;
2019-05-01 08:01:17 +02:00
for ( const auto & key : uptime_quorum - > nodes_to_test )
2019-01-30 05:18:15 +01:00
res . nodes_to_test . push_back ( epee : : string_tools : : pod_to_hex ( key ) ) ;
}
else
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Block height: " ;
error_resp . message + = std : : to_string ( req . height ) ;
error_resp . message + = " , returned null hash or failed to derive quorum list " ;
}
return r ;
}
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : on_get_quorum_state_batched ( const COMMAND_RPC_GET_QUORUM_STATE_BATCHED : : request & req , COMMAND_RPC_GET_QUORUM_STATE_BATCHED : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
PERF_TIMER ( on_get_quorum_state_batched ) ;
const uint64_t cur_height = m_core . get_current_blockchain_height ( ) ;
const uint64_t height_begin = std : : max ( req . height_begin , cur_height - service_nodes : : QUORUM_LIFETIME ) ;
const uint64_t height_end = std : : min ( req . height_end , cur_height ) ;
if ( height_begin > height_end )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " height_end cannot be smaller than height_begin " ;
return true ;
}
boost : : optional < uint64_t > failed_height = boost : : none ;
res . quorum_entries . reserve ( height_end - height_begin + 1 ) ;
for ( auto h = height_begin ; h < = height_end ; + + h )
{
2019-05-01 08:01:17 +02:00
const auto uptime_quorum = m_core . get_uptime_quorum ( h ) ;
2019-01-30 05:18:15 +01:00
2019-05-01 08:01:17 +02:00
if ( ! uptime_quorum ) {
2019-01-30 05:18:15 +01:00
failed_height = h ;
break ;
}
res . quorum_entries . push_back ( { } ) ;
auto & entry = res . quorum_entries . back ( ) ;
entry . height = h ;
2019-05-01 08:01:17 +02:00
entry . quorum_nodes . reserve ( uptime_quorum - > quorum_nodes . size ( ) ) ;
entry . nodes_to_test . reserve ( uptime_quorum - > nodes_to_test . size ( ) ) ;
2019-01-30 05:18:15 +01:00
2019-05-01 08:01:17 +02:00
for ( const auto & key : uptime_quorum - > quorum_nodes )
2019-01-30 05:18:15 +01:00
entry . quorum_nodes . push_back ( epee : : string_tools : : pod_to_hex ( key ) ) ;
2019-05-01 08:01:17 +02:00
for ( const auto & key : uptime_quorum - > nodes_to_test )
2019-01-30 05:18:15 +01:00
entry . nodes_to_test . push_back ( epee : : string_tools : : pod_to_hex ( key ) ) ;
}
if ( failed_height ) {
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Block height: " ;
error_resp . message + = std : : to_string ( * failed_height ) ;
error_resp . message + = " , returned null hash or failed to derive quorum list " ;
} else {
res . status = CORE_RPC_STATUS_OK ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 03:28:21 +01:00
bool core_rpc_server : : on_get_service_node_registration_cmd_raw ( const COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD_RAW : : request & req ,
2019-01-30 05:18:15 +01:00
COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD_RAW : : response & res ,
epee : : json_rpc : : error & error_resp ,
const connection_context * ctx )
2018-07-31 08:13:03 +02:00
{
2019-01-11 03:28:21 +01:00
PERF_TIMER ( on_get_service_node_registration_cmd_raw ) ;
2018-07-31 08:13:03 +02:00
crypto : : public_key service_node_pubkey ;
crypto : : secret_key service_node_key ;
if ( ! m_core . get_service_node_keys ( service_node_pubkey , service_node_key ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Daemon has not been started in service node mode, please relaunch with --service-node flag. " ;
return false ;
}
2019-01-11 03:28:21 +01:00
std : : string err_msg ;
2019-03-13 06:35:02 +01:00
int hf_version = m_core . get_hard_fork_version ( m_core . get_current_blockchain_height ( ) ) ;
if ( ! service_nodes : : make_registration_cmd ( m_core . get_nettype ( ) , hf_version , req . staking_requirement , req . args , service_node_pubkey , service_node_key , res . registration_cmd , req . make_friendly , err_msg ) )
2018-07-31 08:13:03 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to make registration command " ;
2019-01-11 03:28:21 +01:00
if ( err_msg ! = " " )
error_resp . message + = " : " + err_msg ;
2018-07-31 08:13:03 +02:00
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 03:28:21 +01:00
bool core_rpc_server : : on_get_service_node_registration_cmd ( const COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD : : request & req ,
COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD : : response & res ,
2019-01-30 05:18:15 +01:00
epee : : json_rpc : : error & error_resp ,
const connection_context * ctx )
2019-01-11 03:28:21 +01:00
{
PERF_TIMER ( on_get_service_node_registration_cmd ) ;
std : : vector < std : : string > args ;
2019-03-21 01:03:06 +01:00
uint64_t const curr_height = m_core . get_current_blockchain_height ( ) ;
uint64_t staking_requirement = service_nodes : : get_staking_requirement ( m_core . get_nettype ( ) , curr_height , m_core . get_hard_fork_version ( curr_height ) ) ;
2019-01-11 03:28:21 +01:00
{
uint64_t portions_cut ;
if ( ! service_nodes : : get_portions_from_percent_str ( req . operator_cut , portions_cut ) )
{
MERROR ( " Invalid value: " < < req . operator_cut < < " . Should be between [0-100] " ) ;
return false ;
}
args . push_back ( std : : to_string ( portions_cut ) ) ;
}
for ( const auto contrib : req . contributions )
{
uint64_t num_portions = service_nodes : : get_portions_to_make_amount ( staking_requirement , contrib . amount ) ;
args . push_back ( contrib . address ) ;
args . push_back ( std : : to_string ( num_portions ) ) ;
}
COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD_RAW : : request req_old ;
COMMAND_RPC_GET_SERVICE_NODE_REGISTRATION_CMD_RAW : : response res_old ;
2019-03-13 06:35:02 +01:00
req_old . staking_requirement = req . staking_requirement ;
2019-01-11 03:28:21 +01:00
req_old . args = std : : move ( args ) ;
req_old . make_friendly = false ;
const bool success = on_get_service_node_registration_cmd_raw ( req_old , res_old , error_resp ) ;
res . status = res_old . status ;
res . registration_cmd = res_old . registration_cmd ;
return success ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 06:59:47 +01:00
bool core_rpc_server : : on_get_service_node_blacklisted_key_images ( const COMMAND_RPC_GET_SERVICE_NODE_BLACKLISTED_KEY_IMAGES : : request & req , COMMAND_RPC_GET_SERVICE_NODE_BLACKLISTED_KEY_IMAGES : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
PERF_TIMER ( on_get_service_node_blacklisted_key_images ) ;
const std : : vector < service_nodes : : key_image_blacklist_entry > & blacklist = m_core . get_service_node_blacklisted_key_images ( ) ;
res . status = CORE_RPC_STATUS_OK ;
res . blacklist . reserve ( blacklist . size ( ) ) ;
for ( const service_nodes : : key_image_blacklist_entry & entry : blacklist )
{
COMMAND_RPC_GET_SERVICE_NODE_BLACKLISTED_KEY_IMAGES : : entry new_entry = { } ;
new_entry . key_image = epee : : string_tools : : pod_to_hex ( entry . key_image ) ;
new_entry . unlock_height = entry . unlock_height ;
res . blacklist . push_back ( std : : move ( new_entry ) ) ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 05:18:15 +01:00
bool core_rpc_server : : on_get_service_node_key ( const COMMAND_RPC_GET_SERVICE_NODE_KEY : : request & req , COMMAND_RPC_GET_SERVICE_NODE_KEY : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
PERF_TIMER ( on_get_service_node_key ) ;
crypto : : public_key pubkey ;
crypto : : secret_key seckey ;
bool result = m_core . get_service_node_keys ( pubkey , seckey ) ;
if ( result )
{
res . service_node_pubkey = string_tools : : pod_to_hex ( pubkey ) ;
}
else
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Daemon queried is not a service node or did not launch with --service-node " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return result ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 06:59:47 +01:00
bool core_rpc_server : : on_get_all_service_nodes_keys ( const COMMAND_RPC_GET_ALL_SERVICE_NODES_KEYS : : request & req , COMMAND_RPC_GET_ALL_SERVICE_NODES_KEYS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
std : : vector < crypto : : public_key > keys ;
m_core . get_all_service_nodes_public_keys ( keys , req . fully_funded_nodes_only ) ;
res . keys . clear ( ) ;
res . keys . resize ( keys . size ( ) ) ;
size_t i = 0 ;
for ( const auto & key : keys )
{
std : : string const hex64 = string_tools : : pod_to_hex ( key ) ;
res . keys [ i + + ] = loki : : hex64_to_base32z ( hex64 ) ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 05:18:15 +01:00
bool core_rpc_server : : on_get_service_nodes ( const COMMAND_RPC_GET_SERVICE_NODES : : request & req , COMMAND_RPC_GET_SERVICE_NODES : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-08-15 04:59:05 +02:00
{
2018-08-15 06:27:45 +02:00
PERF_TIMER ( on_get_service_nodes ) ;
2018-08-15 04:59:05 +02:00
std : : vector < crypto : : public_key > pubkeys ( req . service_node_pubkeys . size ( ) ) ;
for ( size_t i = 0 ; i < req . service_node_pubkeys . size ( ) ; i + + )
{
if ( ! string_tools : : hex_to_pod ( req . service_node_pubkeys [ i ] , pubkeys [ i ] ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Could not convert to a public key, arg: " ;
error_resp . message + = std : : to_string ( i ) ;
error_resp . message + = " which is pubkey: " ;
error_resp . message + = req . service_node_pubkeys [ i ] ;
return false ;
}
}
std : : vector < service_nodes : : service_node_pubkey_info > pubkey_info_list = m_core . get_service_node_list_state ( pubkeys ) ;
res . status = CORE_RPC_STATUS_OK ;
res . service_node_states . reserve ( pubkey_info_list . size ( ) ) ;
2019-02-15 01:13:27 +01:00
if ( req . include_json )
{
res . as_json = " { \n } " ;
if ( pubkey_info_list . size ( ) > 0 ) {
res . as_json = cryptonote : : obj_to_json_str ( pubkey_info_list ) ;
}
}
2019-05-01 08:01:17 +02:00
res . height = m_core . get_current_blockchain_height ( ) ;
res . block_hash = string_tools : : pod_to_hex ( m_core . get_block_id_by_height ( res . height - 1 ) ) ;
2019-02-15 01:13:27 +01:00
for ( auto & pubkey_info : pubkey_info_list )
2018-08-15 04:59:05 +02:00
{
2018-08-15 06:27:45 +02:00
COMMAND_RPC_GET_SERVICE_NODES : : response : : entry entry = { } ;
2019-02-15 01:13:27 +01:00
2019-04-01 09:39:27 +02:00
const auto proof = m_core . get_uptime_proof ( pubkey_info . pubkey ) ;
2018-08-15 04:59:05 +02:00
entry . service_node_pubkey = string_tools : : pod_to_hex ( pubkey_info . pubkey ) ;
2018-08-20 05:12:33 +02:00
entry . registration_height = pubkey_info . info . registration_height ;
2019-01-25 04:15:52 +01:00
entry . requested_unlock_height = pubkey_info . info . requested_unlock_height ;
2018-08-15 04:59:05 +02:00
entry . last_reward_block_height = pubkey_info . info . last_reward_block_height ;
entry . last_reward_transaction_index = pubkey_info . info . last_reward_transaction_index ;
2019-04-01 09:39:27 +02:00
entry . last_uptime_proof = proof . timestamp ;
entry . service_node_version = { proof . version_major , proof . version_minor , proof . version_patch } ;
2018-08-15 04:59:05 +02:00
entry . contributors . reserve ( pubkey_info . info . contributors . size ( ) ) ;
2019-01-25 04:15:52 +01:00
using namespace service_nodes ;
for ( service_node_info : : contributor_t const & contributor : pubkey_info . info . contributors )
2018-08-15 04:59:05 +02:00
{
2019-01-25 04:15:52 +01:00
COMMAND_RPC_GET_SERVICE_NODES : : response : : contributor new_contributor = { } ;
2018-08-15 04:59:05 +02:00
new_contributor . amount = contributor . amount ;
new_contributor . reserved = contributor . reserved ;
2018-11-16 06:31:11 +01:00
new_contributor . address = cryptonote : : get_account_address_as_str ( m_core . get_nettype ( ) , false /*is_subaddress*/ , contributor . address ) ;
2019-01-25 04:15:52 +01:00
new_contributor . locked_contributions . reserve ( contributor . locked_contributions . size ( ) ) ;
for ( service_node_info : : contribution_t const & src : contributor . locked_contributions )
{
COMMAND_RPC_GET_SERVICE_NODES : : response : : contribution dest = { } ;
dest . amount = src . amount ;
dest . key_image = string_tools : : pod_to_hex ( src . key_image ) ;
dest . key_image_pub_key = string_tools : : pod_to_hex ( src . key_image_pub_key ) ;
new_contributor . locked_contributions . push_back ( dest ) ;
}
2018-08-15 04:59:05 +02:00
entry . contributors . push_back ( new_contributor ) ;
}
entry . total_contributed = pubkey_info . info . total_contributed ;
entry . total_reserved = pubkey_info . info . total_reserved ;
entry . staking_requirement = pubkey_info . info . staking_requirement ;
entry . portions_for_operator = pubkey_info . info . portions_for_operator ;
2018-11-16 06:31:11 +01:00
entry . operator_address = cryptonote : : get_account_address_as_str ( m_core . get_nettype ( ) , false /*is_subaddress*/ , pubkey_info . info . operator_address ) ;
2019-05-01 08:01:17 +02:00
entry . swarm_id = pubkey_info . info . swarm_id ;
2018-08-15 04:59:05 +02:00
res . service_node_states . push_back ( entry ) ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 05:18:15 +01:00
bool core_rpc_server : : on_get_all_service_nodes ( const COMMAND_RPC_GET_SERVICE_NODES : : request & req , COMMAND_RPC_GET_SERVICE_NODES : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-12-18 07:50:49 +01:00
{
auto req_all = req ;
req_all . service_node_pubkeys . clear ( ) ;
return on_get_service_nodes ( req_all , res , error_resp ) ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-30 05:18:15 +01:00
bool core_rpc_server : : on_get_staking_requirement ( const COMMAND_RPC_GET_STAKING_REQUIREMENT : : request & req , COMMAND_RPC_GET_STAKING_REQUIREMENT : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-08-31 09:49:23 +02:00
{
PERF_TIMER ( on_get_staking_requirement ) ;
2019-03-21 01:03:06 +01:00
res . staking_requirement = service_nodes : : get_staking_requirement ( m_core . get_nettype ( ) , req . height , m_core . get_hard_fork_version ( req . height ) ) ;
2018-08-31 09:49:23 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
2019-01-25 04:15:52 +01:00
2015-01-29 23:10:53 +01:00
} // namespace cryptonote