2018-01-07 06:05:16 +01:00
// Copyright (c) 2014-2018, The Monero Project
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// All rights reserved.
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2016-10-10 22:41:24 +02:00
//
2014-07-23 15:03:52 +02:00
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2014-03-03 23:07:58 +01:00
# include "include_base_utils.h"
2017-11-25 23:25:05 +01:00
# include "string_tools.h"
2014-03-03 23:07:58 +01:00
using namespace epee ;
# include "core_rpc_server.h"
# include "common/command_line.h"
2017-02-25 00:16:13 +01:00
# include "common/updates.h"
# include "common/download.h"
# include "common/util.h"
2017-10-29 22:10:46 +01:00
# include "common/perf_timer.h"
2017-01-26 16:07:23 +01:00
# include "cryptonote_basic/cryptonote_format_utils.h"
# include "cryptonote_basic/account.h"
# include "cryptonote_basic/cryptonote_basic_impl.h"
2014-03-03 23:07:58 +01:00
# include "misc_language.h"
2018-12-16 18:57:44 +01:00
# include "net/parse.h"
2018-01-20 11:38:14 +01:00
# include "storages/http_abstract_invoke.h"
2014-03-03 23:07:58 +01:00
# include "crypto/hash.h"
2017-02-05 23:48:03 +01:00
# include "rpc/rpc_args.h"
2018-10-20 04:06:03 +02:00
# include "rpc/rpc_handler.h"
2014-03-03 23:07:58 +01:00
# include "core_rpc_server_error_codes.h"
2017-11-29 23:53:58 +01:00
# include "p2p/net_node.h"
# include "version.h"
2014-03-03 23:07:58 +01:00
2017-02-27 21:25:35 +01:00
# undef MONERO_DEFAULT_LOG_CATEGORY
# define MONERO_DEFAULT_LOG_CATEGORY "daemon.rpc"
2016-06-29 21:43:14 +02:00
# define MAX_RESTRICTED_FAKE_OUTS_COUNT 40
2017-11-23 20:15:45 +01:00
# define MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT 5000
2016-06-29 21:43:14 +02:00
2017-09-16 11:31:49 +02:00
namespace
{
void add_reason ( std : : string & reasons , const char * reason )
{
if ( ! reasons . empty ( ) )
reasons + = " , " ;
reasons + = reason ;
}
}
2014-03-03 23:07:58 +01:00
namespace cryptonote
{
//-----------------------------------------------------------------------------------
void core_rpc_server : : init_options ( boost : : program_options : : options_description & desc )
{
command_line : : add_arg ( desc , arg_rpc_bind_port ) ;
2017-11-16 04:58:11 +01:00
command_line : : add_arg ( desc , arg_rpc_restricted_bind_port ) ;
2015-11-27 19:24:29 +01:00
command_line : : add_arg ( desc , arg_restricted_rpc ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
command_line : : add_arg ( desc , arg_rpc_ssl ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_private_key ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_certificate ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_allowed_certificates ) ;
command_line : : add_arg ( desc , arg_rpc_ssl_allow_any_cert ) ;
2018-01-20 11:38:14 +01:00
command_line : : add_arg ( desc , arg_bootstrap_daemon_address ) ;
command_line : : add_arg ( desc , arg_bootstrap_daemon_login ) ;
2017-02-05 23:48:03 +01:00
cryptonote : : rpc_args : : init_options ( desc ) ;
2014-03-03 23:07:58 +01:00
}
//------------------------------------------------------------------------------------------------------------------------------
2014-09-09 16:58:53 +02:00
core_rpc_server : : core_rpc_server (
core & cr
, nodetool : : node_server < cryptonote : : t_cryptonote_protocol_handler < cryptonote : : core > > & p2p
)
: m_core ( cr )
, m_p2p ( p2p )
2014-03-03 23:07:58 +01:00
{ }
//------------------------------------------------------------------------------------------------------------------------------
2017-02-05 23:48:03 +01:00
bool core_rpc_server : : init (
2014-09-08 19:07:15 +02:00
const boost : : program_options : : variables_map & vm
2017-11-16 04:58:11 +01:00
, const bool restricted
, const std : : string & port
2014-09-08 19:07:15 +02:00
)
2014-03-03 23:07:58 +01:00
{
2017-11-16 04:58:11 +01:00
m_restricted = restricted ;
2017-02-05 23:48:03 +01:00
m_net_server . set_threads_prefix ( " RPC " ) ;
auto rpc_config = cryptonote : : rpc_args : : process ( vm ) ;
if ( ! rpc_config )
return false ;
2018-01-20 11:38:14 +01:00
m_bootstrap_daemon_address = command_line : : get_arg ( vm , arg_bootstrap_daemon_address ) ;
if ( ! m_bootstrap_daemon_address . empty ( ) )
{
const std : : string & bootstrap_daemon_login = command_line : : get_arg ( vm , arg_bootstrap_daemon_login ) ;
const auto loc = bootstrap_daemon_login . find ( ' : ' ) ;
if ( ! bootstrap_daemon_login . empty ( ) & & loc ! = std : : string : : npos )
{
epee : : net_utils : : http : : login login ;
login . username = bootstrap_daemon_login . substr ( 0 , loc ) ;
login . password = bootstrap_daemon_login . substr ( loc + 1 ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
m_http_client . set_server ( m_bootstrap_daemon_address , login , epee : : net_utils : : ssl_support_t : : e_ssl_support_autodetect ) ;
2018-01-20 11:38:14 +01:00
}
else
{
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
m_http_client . set_server ( m_bootstrap_daemon_address , boost : : none , epee : : net_utils : : ssl_support_t : : e_ssl_support_autodetect ) ;
2018-01-20 11:38:14 +01:00
}
m_should_use_bootstrap_daemon = true ;
}
else
{
m_should_use_bootstrap_daemon = false ;
}
m_was_bootstrap_ever_used = false ;
2017-02-05 23:48:03 +01:00
boost : : optional < epee : : net_utils : : http : : login > http_login { } ;
2017-11-16 04:58:11 +01:00
2017-02-05 23:48:03 +01:00
if ( rpc_config - > login )
http_login . emplace ( std : : move ( rpc_config - > login - > username ) , std : : move ( rpc_config - > login - > password ) . password ( ) ) ;
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
epee : : net_utils : : ssl_support_t ssl_support ;
const std : : string ssl = command_line : : get_arg ( vm , arg_rpc_ssl ) ;
if ( ! epee : : net_utils : : ssl_support_from_string ( ssl_support , ssl ) )
{
MFATAL ( " Invalid RPC SSL support: " < < ssl ) ;
return false ;
}
const std : : string ssl_private_key = command_line : : get_arg ( vm , arg_rpc_ssl_private_key ) ;
const std : : string ssl_certificate = command_line : : get_arg ( vm , arg_rpc_ssl_certificate ) ;
const std : : vector < std : : string > ssl_allowed_certificate_paths = command_line : : get_arg ( vm , arg_rpc_ssl_allowed_certificates ) ;
std : : list < std : : string > ssl_allowed_certificates ;
for ( const std : : string & path : ssl_allowed_certificate_paths )
{
ssl_allowed_certificates . push_back ( { } ) ;
if ( ! epee : : file_io_utils : : load_file_to_string ( path , ssl_allowed_certificates . back ( ) ) )
{
MERROR ( " Failed to load certificate: " < < path ) ;
ssl_allowed_certificates . back ( ) = std : : string ( ) ;
}
}
const bool ssl_allow_any_cert = command_line : : get_arg ( vm , arg_rpc_ssl_allow_any_cert ) ;
2017-12-21 12:45:01 +01:00
auto rng = [ ] ( size_t len , uint8_t * ptr ) { return crypto : : rand ( len , ptr ) ; } ;
2017-02-05 23:48:03 +01:00
return epee : : http_server_impl_base < core_rpc_server , connection_context > : : init (
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
rng , std : : move ( port ) , std : : move ( rpc_config - > bind_ip ) , std : : move ( rpc_config - > access_control_origins ) , std : : move ( http_login ) ,
ssl_support , std : : make_pair ( ssl_private_key , ssl_certificate ) , ssl_allowed_certificates , ssl_allow_any_cert
2017-02-05 23:48:03 +01:00
) ;
2014-03-03 23:07:58 +01:00
}
2014-06-01 23:53:44 +02:00
//------------------------------------------------------------------------------------------------------------------------------
bool core_rpc_server : : check_core_ready ( )
{
if ( ! m_p2p . get_payload_object ( ) . is_synchronized ( ) )
2014-03-20 12:46:11 +01:00
{
return false ;
}
2017-10-20 21:49:23 +02:00
return true ;
2014-03-20 12:46:11 +01:00
}
2014-10-06 15:18:16 +02:00
# define CHECK_CORE_READY() do { if(!check_core_ready()){res.status = CORE_RPC_STATUS_BUSY;return true;} } while(0)
2014-03-20 12:46:11 +01:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_height ( const COMMAND_RPC_GET_HEIGHT : : request & req , COMMAND_RPC_GET_HEIGHT : : response & res , const connection_context * ctx )
2014-03-20 12:46:11 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HEIGHT > ( invoke_http_mode : : JON , " /getheight " , req , res , r ) )
return r ;
2014-03-03 23:07:58 +01:00
res . height = m_core . get_current_blockchain_height ( ) ;
2014-03-20 12:46:11 +01:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_info ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_info ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_INFO > ( invoke_http_mode : : JON , " /getinfo " , req , res , r ) )
{
res . bootstrap_daemon_address = m_bootstrap_daemon_address ;
crypto : : hash top_hash ;
m_core . get_blockchain_top ( res . height_without_bootstrap , top_hash ) ;
+ + res . height_without_bootstrap ; // turn top block height into blockchain height
res . was_bootstrap_ever_used = true ;
return r ;
}
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
2015-12-18 20:56:17 +01:00
crypto : : hash top_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2015-12-18 20:56:17 +01:00
+ + res . height ; // turn top block height into blockchain height
res . top_block_hash = string_tools : : pod_to_hex ( top_hash ) ;
2014-06-04 22:50:13 +02:00
res . target_height = m_core . get_target_blockchain_height ( ) ;
2014-03-03 23:07:58 +01:00
res . difficulty = m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) ;
2017-04-06 21:01:07 +02:00
res . target = m_core . get_blockchain_storage ( ) . get_difficulty_target ( ) ;
2014-03-03 23:07:58 +01:00
res . tx_count = m_core . get_blockchain_storage ( ) . get_total_transactions ( ) - res . height ; //without coinbase
res . tx_pool_size = m_core . get_pool_transactions_count ( ) ;
2019-01-11 20:09:39 +01:00
res . alt_blocks_count = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_alternative_blocks_count ( ) ;
2018-12-16 18:57:44 +01:00
uint64_t total_conn = restricted ? 0 : m_p2p . get_public_connections_count ( ) ;
res . outgoing_connections_count = restricted ? 0 : m_p2p . get_public_outgoing_connections_count ( ) ;
2019-01-11 20:09:39 +01:00
res . incoming_connections_count = restricted ? 0 : ( total_conn - res . outgoing_connections_count ) ;
res . rpc_connections_count = restricted ? 0 : get_connections_count ( ) ;
2018-12-16 18:57:44 +01:00
res . white_peerlist_size = restricted ? 0 : m_p2p . get_public_white_peers_count ( ) ;
res . grey_peerlist_size = restricted ? 0 : m_p2p . get_public_gray_peers_count ( ) ;
2018-11-16 05:32:05 +01:00
cryptonote : : network_type net_type = nettype ( ) ;
res . mainnet = net_type = = MAINNET ;
res . testnet = net_type = = TESTNET ;
res . stagenet = net_type = = STAGENET ;
res . nettype = net_type = = MAINNET ? " mainnet " : net_type = = TESTNET ? " testnet " : net_type = = STAGENET ? " stagenet " : " fakechain " ;
2016-10-15 16:00:21 +02:00
res . cumulative_difficulty = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( res . height - 1 ) ;
2018-07-18 23:24:53 +02:00
res . block_size_limit = res . block_weight_limit = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_limit ( ) ;
res . block_size_median = res . block_weight_median = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_median ( ) ;
2014-03-20 12:46:11 +01:00
res . status = CORE_RPC_STATUS_OK ;
2019-01-11 20:09:39 +01:00
res . start_time = restricted ? 0 : ( uint64_t ) m_core . get_start_time ( ) ;
res . free_space = restricted ? std : : numeric_limits < uint64_t > : : max ( ) : m_core . get_free_space ( ) ;
2017-11-30 16:44:01 +01:00
res . offline = m_core . offline ( ) ;
2019-01-11 20:09:39 +01:00
res . bootstrap_daemon_address = restricted ? " " : m_bootstrap_daemon_address ;
res . height_without_bootstrap = restricted ? 0 : res . height ;
if ( restricted )
2018-11-20 22:41:03 +01:00
res . was_bootstrap_ever_used = false ;
else
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
res . was_bootstrap_ever_used = m_was_bootstrap_ever_used ;
}
2019-01-11 20:09:39 +01:00
res . database_size = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_db ( ) . get_database_size ( ) ;
res . update_available = restricted ? false : m_core . is_update_available ( ) ;
res . version = restricted ? " " : MONERO_VERSION ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-10-14 09:54:07 +02:00
class pruned_transaction {
transaction & tx ;
public :
pruned_transaction ( transaction & tx ) : tx ( tx ) { }
BEGIN_SERIALIZE_OBJECT ( )
bool r = tx . serialize_base ( ar ) ;
if ( ! r ) return false ;
END_SERIALIZE ( )
} ;
2018-09-18 02:38:40 +02:00
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_blocks ( const COMMAND_RPC_GET_BLOCKS_FAST : : request & req , COMMAND_RPC_GET_BLOCKS_FAST : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_blocks ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_FAST > ( invoke_http_mode : : BIN , " /getblocks.bin " , req , res , r ) )
return r ;
2018-04-16 21:16:00 +02:00
std : : vector < std : : pair < std : : pair < cryptonote : : blobdata , crypto : : hash > , std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > > > bs ;
2014-08-01 10:17:50 +02:00
2018-04-16 21:16:00 +02:00
if ( ! m_core . find_blockchain_supplement ( req . start_height , req . block_ids , bs , res . current_height , res . start_height , req . prune , ! req . no_miner_tx , COMMAND_RPC_GET_BLOCKS_FAST_MAX_COUNT ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed " ;
return false ;
}
2017-02-27 21:26:17 +01:00
size_t pruned_size = 0 , unpruned_size = 0 , ntxes = 0 ;
2018-04-16 01:16:02 +02:00
res . blocks . reserve ( bs . size ( ) ) ;
res . output_indices . reserve ( bs . size ( ) ) ;
2017-01-15 17:05:55 +01:00
for ( auto & bd : bs )
2014-03-03 23:07:58 +01:00
{
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
2018-04-16 21:16:00 +02:00
res . blocks . back ( ) . block = bd . first . first ;
pruned_size + = bd . first . first . size ( ) ;
unpruned_size + = bd . first . first . size ( ) ;
2016-07-13 20:26:11 +02:00
res . output_indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : block_output_indices ( ) ) ;
2017-02-27 21:26:17 +01:00
ntxes + = bd . second . size ( ) ;
2018-12-16 14:28:49 +01:00
res . output_indices . back ( ) . indices . reserve ( 1 + bd . second . size ( ) ) ;
if ( req . no_miner_tx )
res . output_indices . back ( ) . indices . push_back ( COMMAND_RPC_GET_BLOCKS_FAST : : tx_output_indices ( ) ) ;
2018-04-16 01:16:02 +02:00
res . blocks . back ( ) . txs . reserve ( bd . second . size ( ) ) ;
2018-04-16 21:16:00 +02:00
for ( std : : vector < std : : pair < crypto : : hash , cryptonote : : blobdata > > : : iterator i = bd . second . begin ( ) ; i ! = bd . second . end ( ) ; + + i )
2014-03-03 23:07:58 +01:00
{
2018-04-16 21:16:00 +02:00
unpruned_size + = i - > second . size ( ) ;
res . blocks . back ( ) . txs . push_back ( std : : move ( i - > second ) ) ;
i - > second . clear ( ) ;
i - > second . shrink_to_fit ( ) ;
2017-02-27 21:26:17 +01:00
pruned_size + = res . blocks . back ( ) . txs . back ( ) . size ( ) ;
2018-12-16 14:28:49 +01:00
}
2017-08-25 19:59:29 +02:00
2018-12-16 14:28:49 +01:00
const size_t n_txes_to_lookup = bd . second . size ( ) + ( req . no_miner_tx ? 0 : 1 ) ;
if ( n_txes_to_lookup > 0 )
{
std : : vector < std : : vector < uint64_t > > indices ;
bool r = m_core . get_tx_outputs_gindexs ( req . no_miner_tx ? bd . second . front ( ) . first : bd . first . second , n_txes_to_lookup , indices ) ;
2016-07-13 20:26:11 +02:00
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2018-12-16 14:28:49 +01:00
if ( indices . size ( ) ! = n_txes_to_lookup | | res . output_indices . back ( ) . indices . size ( ) ! = ( req . no_miner_tx ? 1 : 0 ) )
{
res . status = " Failed " ;
return false ;
}
for ( size_t i = 0 ; i < indices . size ( ) ; + + i )
res . output_indices . back ( ) . indices . push_back ( { std : : move ( indices [ i ] ) } ) ;
2014-03-03 23:07:58 +01:00
}
}
2017-02-27 21:26:17 +01:00
MDEBUG ( " on_get_blocks: " < < bs . size ( ) < < " blocks, " < < ntxes < < " txes, pruned size " < < pruned_size < < " , unpruned size " < < unpruned_size ) ;
2014-03-03 23:07:58 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2017-07-04 06:32:44 +02:00
}
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_alt_blocks_hashes ( const COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : request & req , COMMAND_RPC_GET_ALT_BLOCKS_HASHES : : response & res , const connection_context * ctx )
2017-07-04 06:32:44 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_alt_blocks_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_ALT_BLOCKS_HASHES > ( invoke_http_mode : : JON , " /get_alt_blocks_hashes " , req , res , r ) )
return r ;
2018-04-16 01:16:02 +02:00
std : : vector < block > blks ;
2017-07-04 06:32:44 +02:00
if ( ! m_core . get_alternative_blocks ( blks ) )
{
res . status = " Failed " ;
return false ;
}
res . blks_hashes . reserve ( blks . size ( ) ) ;
for ( auto const & blk : blks )
{
res . blks_hashes . push_back ( epee : : string_tools : : pod_to_hex ( get_block_hash ( blk ) ) ) ;
}
MDEBUG ( " on_get_alt_blocks_hashes: " < < blks . size ( ) < < " blocks " ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
2014-03-03 23:07:58 +01:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_blocks_by_height ( const COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCKS_BY_HEIGHT : : response & res , const connection_context * ctx )
2016-12-25 09:18:15 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_blocks_by_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCKS_BY_HEIGHT > ( invoke_http_mode : : BIN , " /getblocks_by_height.bin " , req , res , r ) )
return r ;
2016-12-25 09:18:15 +01:00
res . status = " Failed " ;
res . blocks . clear ( ) ;
res . blocks . reserve ( req . heights . size ( ) ) ;
for ( uint64_t height : req . heights )
{
block blk ;
try
{
blk = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( height ) ;
}
catch ( . . . )
{
2017-01-26 19:11:37 +01:00
res . status = " Error retrieving block at height " + std : : to_string ( height ) ;
2016-12-25 09:18:15 +01:00
return true ;
}
2018-04-16 01:16:02 +02:00
std : : vector < transaction > txs ;
std : : vector < crypto : : hash > missed_txs ;
2016-12-25 09:18:15 +01:00
m_core . get_transactions ( blk . tx_hashes , txs , missed_txs ) ;
res . blocks . resize ( res . blocks . size ( ) + 1 ) ;
res . blocks . back ( ) . block = block_to_blob ( blk ) ;
for ( auto & tx : txs )
res . blocks . back ( ) . txs . push_back ( tx_to_blob ( tx ) ) ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_hashes ( const COMMAND_RPC_GET_HASHES_FAST : : request & req , COMMAND_RPC_GET_HASHES_FAST : : response & res , const connection_context * ctx )
2016-04-14 00:45:02 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_HASHES_FAST > ( invoke_http_mode : : BIN , " /gethashes.bin " , req , res , r ) )
return r ;
2016-04-14 00:45:02 +02:00
NOTIFY_RESPONSE_CHAIN_ENTRY : : request resp ;
resp . start_height = req . start_height ;
if ( ! m_core . find_blockchain_supplement ( req . block_ids , resp ) )
{
res . status = " Failed " ;
return false ;
}
res . current_height = resp . total_height ;
res . start_height = resp . start_height ;
res . m_block_ids = std : : move ( resp . m_block_ids ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_outs_bin ( const COMMAND_RPC_GET_OUTPUTS_BIN : : request & req , COMMAND_RPC_GET_OUTPUTS_BIN : : response & res , const connection_context * ctx )
2016-08-02 22:48:09 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_outs_bin ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS_BIN > ( invoke_http_mode : : BIN , " /get_outs.bin " , req , res , r ) )
return r ;
2016-08-02 22:48:09 +02:00
res . status = " Failed " ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-08-02 22:48:09 +02:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
if ( ! m_core . get_outs ( req , res ) )
{
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_outs ( const COMMAND_RPC_GET_OUTPUTS : : request & req , COMMAND_RPC_GET_OUTPUTS : : response & res , const connection_context * ctx )
2016-11-22 21:00:40 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_outs ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUTS > ( invoke_http_mode : : JON , " /get_outs " , req , res , r ) )
return r ;
2016-11-22 21:00:40 +01:00
res . status = " Failed " ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
if ( restricted )
2016-11-22 21:00:40 +01:00
{
if ( req . outputs . size ( ) > MAX_RESTRICTED_GLOBAL_FAKE_OUTS_COUNT )
{
res . status = " Too many outs requested " ;
return true ;
}
}
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : request req_bin ;
req_bin . outputs = req . outputs ;
cryptonote : : COMMAND_RPC_GET_OUTPUTS_BIN : : response res_bin ;
if ( ! m_core . get_outs ( req_bin , res_bin ) )
{
return true ;
}
// convert to text
for ( const auto & i : res_bin . outs )
{
res . outs . push_back ( cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey ( ) ) ;
cryptonote : : COMMAND_RPC_GET_OUTPUTS : : outkey & outkey = res . outs . back ( ) ;
outkey . key = epee : : string_tools : : pod_to_hex ( i . key ) ;
outkey . mask = epee : : string_tools : : pod_to_hex ( i . mask ) ;
outkey . unlocked = i . unlocked ;
2016-12-23 13:04:54 +01:00
outkey . height = i . height ;
outkey . txid = epee : : string_tools : : pod_to_hex ( i . txid ) ;
2016-11-22 21:00:40 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_indexes ( const COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : request & req , COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_indexes ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES > ( invoke_http_mode : : BIN , " /get_o_indexes.bin " , req , res , ok ) )
return ok ;
2014-03-03 23:07:58 +01:00
bool r = m_core . get_tx_outputs_gindexs ( req . txid , res . o_indexes ) ;
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
LOG_PRINT_L2 ( " COMMAND_RPC_GET_TX_GLOBAL_OUTPUTS_INDEXES: [ " < < res . o_indexes . size ( ) < < " ] " ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transactions ( const COMMAND_RPC_GET_TRANSACTIONS : : request & req , COMMAND_RPC_GET_TRANSACTIONS : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transactions ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTIONS > ( invoke_http_mode : : JON , " /gettransactions " , req , res , ok ) )
return ok ;
2014-03-03 23:07:58 +01:00
std : : vector < crypto : : hash > vh ;
2017-01-22 21:38:10 +01:00
for ( const auto & tx_hex_str : req . txs_hashes )
2014-03-03 23:07:58 +01:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( tx_hex_str , b ) )
{
res . status = " Failed to parse hex representation of transaction hash " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : hash ) )
{
res . status = " Failed, size of data mismatch " ;
2016-01-30 14:28:12 +01:00
return true ;
2014-03-03 23:07:58 +01:00
}
vh . push_back ( * reinterpret_cast < const crypto : : hash * > ( b . data ( ) ) ) ;
}
2018-04-16 01:16:02 +02:00
std : : vector < crypto : : hash > missed_txs ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > txs ;
bool r = m_core . get_split_transactions_blobs ( vh , txs , missed_txs ) ;
2014-03-03 23:07:58 +01:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-11 16:53:55 +02:00
LOG_PRINT_L2 ( " Found " < < txs . size ( ) < < " / " < < vh . size ( ) < < " transactions on the blockchain " ) ;
// try the pool for any missing txes
size_t found_in_pool = 0 ;
2016-04-03 13:51:28 +02:00
std : : unordered_set < crypto : : hash > pool_tx_hashes ;
2017-09-22 14:57:20 +02:00
std : : unordered_map < crypto : : hash , bool > double_spend_seen ;
2015-08-11 16:53:55 +02:00
if ( ! missed_txs . empty ( ) )
{
2017-09-22 14:57:20 +02:00
std : : vector < tx_info > pool_tx_info ;
std : : vector < spent_key_image_info > pool_key_image_info ;
bool r = m_core . get_pool_transactions_and_spent_keys_info ( pool_tx_info , pool_key_image_info ) ;
2015-08-11 16:53:55 +02:00
if ( r )
{
2017-09-01 22:22:45 +02:00
// sort to match original request
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : vector < std : : tuple < crypto : : hash , cryptonote : : blobdata , crypto : : hash , cryptonote : : blobdata > > sorted_txs ;
2017-09-22 14:57:20 +02:00
std : : vector < tx_info > : : const_iterator i ;
2018-04-16 01:16:02 +02:00
unsigned txs_processed = 0 ;
2017-09-01 22:22:45 +02:00
for ( const crypto : : hash & h : vh )
2015-08-11 16:53:55 +02:00
{
2017-09-01 22:22:45 +02:00
if ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) = = missed_txs . end ( ) )
2015-08-11 16:53:55 +02:00
{
2018-04-16 01:16:02 +02:00
if ( txs . size ( ) = = txs_processed )
2017-12-11 23:36:58 +01:00
{
res . status = " Failed: internal error - txs is empty " ;
return true ;
}
2017-09-01 22:22:45 +02:00
// core returns the ones it finds in the right order
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
if ( std : : get < 0 > ( txs [ txs_processed ] ) ! = h )
2017-09-01 22:22:45 +02:00
{
res . status = " Failed: tx hash mismatch " ;
return true ;
}
2018-04-16 01:16:02 +02:00
sorted_txs . push_back ( std : : move ( txs [ txs_processed ] ) ) ;
+ + txs_processed ;
2017-09-01 22:22:45 +02:00
}
2017-09-22 14:57:20 +02:00
else if ( ( i = std : : find_if ( pool_tx_info . begin ( ) , pool_tx_info . end ( ) , [ h ] ( const tx_info & txi ) { return epee : : string_tools : : pod_to_hex ( h ) = = txi . id_hash ; } ) ) ! = pool_tx_info . end ( ) )
2017-09-01 22:22:45 +02:00
{
2017-09-22 14:57:20 +02:00
cryptonote : : transaction tx ;
if ( ! cryptonote : : parse_and_validate_tx_from_blob ( i - > tx_blob , tx ) )
{
res . status = " Failed to parse and validate tx from blob " ;
return true ;
}
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
std : : stringstream ss ;
binary_archive < true > ba ( ss ) ;
bool r = const_cast < cryptonote : : transaction & > ( tx ) . serialize_base ( ba ) ;
if ( ! r )
{
res . status = " Failed to serialize transaction base " ;
return true ;
}
const cryptonote : : blobdata pruned = ss . str ( ) ;
sorted_txs . push_back ( std : : make_tuple ( h , pruned , get_transaction_prunable_hash ( tx ) , std : : string ( i - > tx_blob , pruned . size ( ) ) ) ) ;
2018-04-16 01:16:02 +02:00
missed_txs . erase ( std : : find ( missed_txs . begin ( ) , missed_txs . end ( ) , h ) ) ;
2017-09-29 17:26:57 +02:00
pool_tx_hashes . insert ( h ) ;
2017-09-22 14:57:20 +02:00
const std : : string hash_string = epee : : string_tools : : pod_to_hex ( h ) ;
for ( const auto & ti : pool_tx_info )
{
if ( ti . id_hash = = hash_string )
{
double_spend_seen . insert ( std : : make_pair ( h , ti . double_spend_seen ) ) ;
break ;
}
}
2015-08-11 16:53:55 +02:00
+ + found_in_pool ;
}
}
2017-09-01 22:22:45 +02:00
txs = sorted_txs ;
2015-08-11 16:53:55 +02:00
}
LOG_PRINT_L2 ( " Found " < < found_in_pool < < " / " < < vh . size ( ) < < " transactions in the pool " ) ;
}
2014-03-03 23:07:58 +01:00
2018-04-16 01:16:02 +02:00
std : : vector < std : : string > : : const_iterator txhi = req . txs_hashes . begin ( ) ;
2016-04-03 13:51:28 +02:00
std : : vector < crypto : : hash > : : const_iterator vhi = vh . begin ( ) ;
2017-01-22 21:38:10 +01:00
for ( auto & tx : txs )
2014-03-03 23:07:58 +01:00
{
2016-04-03 13:51:28 +02:00
res . txs . push_back ( COMMAND_RPC_GET_TRANSACTIONS : : entry ( ) ) ;
COMMAND_RPC_GET_TRANSACTIONS : : entry & e = res . txs . back ( ) ;
crypto : : hash tx_hash = * vhi + + ;
e . tx_hash = * txhi + + ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
e . prunable_hash = epee : : string_tools : : pod_to_hex ( std : : get < 2 > ( tx ) ) ;
if ( req . split | | req . prune | | std : : get < 3 > ( tx ) . empty ( ) )
{
e . pruned_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 1 > ( tx ) ) ;
if ( ! req . prune )
e . prunable_as_hex = string_tools : : buff_to_hex_nodelimer ( std : : get < 3 > ( tx ) ) ;
}
else
{
cryptonote : : blobdata tx_data ;
if ( req . prune )
tx_data = std : : get < 1 > ( tx ) ;
else
tx_data = std : : get < 1 > ( tx ) + std : : get < 3 > ( tx ) ;
e . as_hex = string_tools : : buff_to_hex_nodelimer ( tx_data ) ;
if ( req . decode_as_json & & ! tx_data . empty ( ) )
{
cryptonote : : transaction t ;
if ( cryptonote : : parse_and_validate_tx_from_blob ( tx_data , t ) )
{
if ( req . prune )
{
pruned_transaction pruned_tx { t } ;
e . as_json = obj_to_json_str ( pruned_tx ) ;
}
else
e . as_json = obj_to_json_str ( t ) ;
}
}
}
2016-04-03 13:51:28 +02:00
e . in_pool = pool_tx_hashes . find ( tx_hash ) ! = pool_tx_hashes . end ( ) ;
if ( e . in_pool )
{
2017-08-30 04:30:31 +02:00
e . block_height = e . block_timestamp = std : : numeric_limits < uint64_t > : : max ( ) ;
2017-09-22 14:57:20 +02:00
if ( double_spend_seen . find ( tx_hash ) ! = double_spend_seen . end ( ) )
{
e . double_spend_seen = double_spend_seen [ tx_hash ] ;
}
else
{
MERROR ( " Failed to determine double spend status for " < < tx_hash ) ;
e . double_spend_seen = false ;
}
2016-04-03 13:51:28 +02:00
}
else
{
e . block_height = m_core . get_blockchain_storage ( ) . get_db ( ) . get_tx_block_height ( tx_hash ) ;
2017-08-30 04:30:31 +02:00
e . block_timestamp = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_timestamp ( e . block_height ) ;
2017-09-22 14:57:20 +02:00
e . double_spend_seen = false ;
2016-04-03 13:51:28 +02:00
}
// fill up old style responses too, in case an old wallet asks
res . txs_as_hex . push_back ( e . as_hex ) ;
2015-10-13 23:11:52 +02:00
if ( req . decode_as_json )
2016-04-03 13:51:28 +02:00
res . txs_as_json . push_back ( e . as_json ) ;
2016-11-20 15:12:19 +01:00
2016-11-23 19:55:32 +01:00
// output indices too if not in pool
if ( pool_tx_hashes . find ( tx_hash ) = = pool_tx_hashes . end ( ) )
2016-11-20 15:12:19 +01:00
{
2016-11-23 19:55:32 +01:00
bool r = m_core . get_tx_outputs_gindexs ( tx_hash , e . output_indices ) ;
if ( ! r )
{
res . status = " Failed " ;
return false ;
}
2016-11-20 15:12:19 +01:00
}
2014-03-03 23:07:58 +01:00
}
2017-01-22 21:38:10 +01:00
for ( const auto & miss_tx : missed_txs )
2014-03-03 23:07:58 +01:00
{
res . missed_tx . push_back ( string_tools : : pod_to_hex ( miss_tx ) ) ;
}
2016-04-03 13:51:28 +02:00
LOG_PRINT_L2 ( res . txs . size ( ) < < " transactions found, " < < res . missed_tx . size ( ) < < " not found " ) ;
2014-03-03 23:07:58 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_is_key_image_spent ( const COMMAND_RPC_IS_KEY_IMAGE_SPENT : : request & req , COMMAND_RPC_IS_KEY_IMAGE_SPENT : : response & res , const connection_context * ctx )
2015-08-11 11:49:15 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_is_key_image_spent ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_IS_KEY_IMAGE_SPENT > ( invoke_http_mode : : JON , " /is_key_image_spent " , req , res , ok ) )
return ok ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2015-08-11 11:49:15 +02:00
std : : vector < crypto : : key_image > key_images ;
2017-01-22 21:38:10 +01:00
for ( const auto & ki_hex_str : req . key_images )
2015-08-11 11:49:15 +02:00
{
blobdata b ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( ki_hex_str , b ) )
{
res . status = " Failed to parse hex representation of key image " ;
return true ;
}
if ( b . size ( ) ! = sizeof ( crypto : : key_image ) )
{
res . status = " Failed, size of data mismatch " ;
}
key_images . push_back ( * reinterpret_cast < const crypto : : key_image * > ( b . data ( ) ) ) ;
}
2015-08-13 17:33:28 +02:00
std : : vector < bool > spent_status ;
bool r = m_core . are_key_images_spent ( key_images , spent_status ) ;
2015-08-11 11:49:15 +02:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
2015-08-13 17:33:28 +02:00
res . spent_status . clear ( ) ;
for ( size_t n = 0 ; n < spent_status . size ( ) ; + + n )
2016-01-05 22:57:43 +01:00
res . spent_status . push_back ( spent_status [ n ] ? COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_BLOCKCHAIN : COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT ) ;
// check the pool too
std : : vector < cryptonote : : tx_info > txs ;
std : : vector < cryptonote : : spent_key_image_info > ki ;
2019-01-11 20:09:39 +01:00
r = m_core . get_pool_transactions_and_spent_keys_info ( txs , ki , ! request_has_rpc_origin | | ! restricted ) ;
2016-01-05 22:57:43 +01:00
if ( ! r )
{
res . status = " Failed " ;
return true ;
}
for ( std : : vector < cryptonote : : spent_key_image_info > : : const_iterator i = ki . begin ( ) ; i ! = ki . end ( ) ; + + i )
{
crypto : : hash hash ;
crypto : : key_image spent_key_image ;
if ( parse_hash256 ( i - > id_hash , hash ) )
{
memcpy ( & spent_key_image , & hash , sizeof ( hash ) ) ; // a bit dodgy, should be other parse functions somewhere
for ( size_t n = 0 ; n < res . spent_status . size ( ) ; + + n )
{
if ( res . spent_status [ n ] = = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : UNSPENT )
{
if ( key_images [ n ] = = spent_key_image )
{
res . spent_status [ n ] = COMMAND_RPC_IS_KEY_IMAGE_SPENT : : SPENT_IN_POOL ;
break ;
}
}
}
}
}
2015-08-11 11:49:15 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_send_raw_tx ( const COMMAND_RPC_SEND_RAW_TX : : request & req , COMMAND_RPC_SEND_RAW_TX : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_send_raw_tx ) ;
2018-01-20 11:38:14 +01:00
bool ok ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_SEND_RAW_TX > ( invoke_http_mode : : JON , " /sendrawtransaction " , req , res , ok ) )
return ok ;
2014-04-02 18:00:17 +02:00
CHECK_CORE_READY ( ) ;
2014-03-03 23:07:58 +01:00
std : : string tx_blob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req . tx_as_hex , tx_blob ) )
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to parse tx from hexbuff: " < < req . tx_as_hex ) ;
res . status = " Failed " ;
return true ;
}
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
tx_verification_context tvc = AUTO_VAL_INIT ( tvc ) ;
2017-01-14 14:01:21 +01:00
if ( ! m_core . handle_incoming_tx ( tx_blob , tvc , false , false , req . do_not_relay ) | | tvc . m_verifivation_failed )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed " ;
2018-12-30 02:32:20 +01:00
std : : string reason = " " ;
2016-03-27 13:35:36 +02:00
if ( ( res . low_mixin = tvc . m_low_mixin ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " bad ring size " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . double_spend = tvc . m_double_spend ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " double spend " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . invalid_input = tvc . m_invalid_input ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " invalid input " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . invalid_output = tvc . m_invalid_output ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " invalid output " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . too_big = tvc . m_too_big ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " too big " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . overspend = tvc . m_overspend ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " overspend " ) ;
2016-03-27 13:35:36 +02:00
if ( ( res . fee_too_low = tvc . m_fee_too_low ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " fee too low " ) ;
2016-07-02 11:02:12 +02:00
if ( ( res . not_rct = tvc . m_not_rct ) )
2018-12-30 02:32:20 +01:00
add_reason ( reason , " tx is not ringct " ) ;
const std : : string punctuation = reason . empty ( ) ? " " : " : " ;
2017-09-16 11:31:49 +02:00
if ( tvc . m_verifivation_failed )
{
2018-12-30 02:32:20 +01:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx verification failed " < < punctuation < < reason ) ;
2017-09-16 11:31:49 +02:00
}
else
{
2018-12-30 02:32:20 +01:00
LOG_PRINT_L0 ( " [on_send_raw_tx]: Failed to process tx " < < punctuation < < reason ) ;
2017-09-16 11:31:49 +02:00
}
2014-03-03 23:07:58 +01:00
return true ;
}
2017-01-14 14:01:21 +01:00
if ( ! tvc . m_should_be_relayed )
2014-03-03 23:07:58 +01:00
{
LOG_PRINT_L0 ( " [on_send_raw_tx]: tx accepted, but not relayed " ) ;
2016-03-27 13:35:36 +02:00
res . reason = " Not relayed " ;
res . not_relayed = true ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
NOTIFY_NEW_TRANSACTIONS : : request r ;
r . txs . push_back ( tx_blob ) ;
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_start_mining ( const COMMAND_RPC_START_MINING : : request & req , COMMAND_RPC_START_MINING : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_start_mining ) ;
2014-03-20 12:46:11 +01:00
CHECK_CORE_READY ( ) ;
2017-02-19 03:42:10 +01:00
cryptonote : : address_parse_info info ;
2018-11-16 05:32:05 +01:00
if ( ! get_account_address_from_str ( info , nettype ( ) , req . miner_address ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, wrong address " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
2017-02-19 03:42:10 +01:00
if ( info . is_subaddress )
{
res . status = " Mining to subaddress isn't supported yet " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2014-03-03 23:07:58 +01:00
2017-01-26 20:31:56 +01:00
unsigned int concurrency_count = boost : : thread : : hardware_concurrency ( ) * 4 ;
// if we couldn't detect threads, set it to a ridiculously high number
if ( concurrency_count = = 0 )
{
concurrency_count = 257 ;
}
// if there are more threads requested than the hardware supports
// then we fail and log that.
if ( req . threads_count > concurrency_count )
{
res . status = " Failed, too many threads relative to CPU cores. " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
2014-04-30 22:50:06 +02:00
boost : : thread : : attributes attrs ;
attrs . set_stack_size ( THREAD_STACK_SIZE ) ;
2018-09-09 17:26:50 +02:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( miner . is_mining ( ) )
{
res . status = " Already mining " ;
return true ;
}
if ( ! miner . start ( info . address , static_cast < size_t > ( req . threads_count ) , attrs , req . do_background_mining , req . ignore_battery ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, mining not started " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_mining ( const COMMAND_RPC_STOP_MINING : : request & req , COMMAND_RPC_STOP_MINING : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_mining ) ;
2018-10-28 14:50:33 +01:00
cryptonote : : miner & miner = m_core . get_miner ( ) ;
if ( ! miner . is_mining ( ) )
{
res . status = " Mining never started " ;
LOG_PRINT_L0 ( res . status ) ;
return true ;
}
if ( ! miner . stop ( ) )
2014-03-03 23:07:58 +01:00
{
res . status = " Failed, mining not stopped " ;
2015-05-29 00:13:32 +02:00
LOG_PRINT_L0 ( res . status ) ;
2014-03-03 23:07:58 +01:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_mining_status ( const COMMAND_RPC_MINING_STATUS : : request & req , COMMAND_RPC_MINING_STATUS : : response & res , const connection_context * ctx )
2014-05-25 21:36:12 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_mining_status ) ;
2014-05-25 21:36:12 +02:00
const miner & lMiner = m_core . get_miner ( ) ;
res . active = lMiner . is_mining ( ) ;
2017-02-08 22:17:50 +01:00
res . is_background_mining_enabled = lMiner . get_is_background_mining_enabled ( ) ;
2016-10-10 22:41:24 +02:00
2014-05-25 21:36:12 +02:00
if ( lMiner . is_mining ( ) ) {
res . speed = lMiner . get_speed ( ) ;
res . threads_count = lMiner . get_threads_count ( ) ;
const account_public_address & lMiningAdr = lMiner . get_mining_address ( ) ;
2018-11-16 05:32:05 +01:00
res . address = get_account_address_as_str ( nettype ( ) , false , lMiningAdr ) ;
2014-05-25 21:36:12 +02:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_save_bc ( const COMMAND_RPC_SAVE_BC : : request & req , COMMAND_RPC_SAVE_BC : : response & res , const connection_context * ctx )
2014-05-16 00:21:43 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_save_bc ) ;
2014-05-16 00:21:43 +02:00
if ( ! m_core . get_blockchain_storage ( ) . store_blockchain ( ) )
{
2018-03-01 12:36:19 +01:00
res . status = " Error while storing blockchain " ;
2014-05-16 00:21:43 +02:00
return true ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_peer_list ( const COMMAND_RPC_GET_PEER_LIST : : request & req , COMMAND_RPC_GET_PEER_LIST : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_peer_list ) ;
2018-12-05 23:25:27 +01:00
std : : vector < nodetool : : peerlist_entry > white_list ;
std : : vector < nodetool : : peerlist_entry > gray_list ;
2018-12-16 18:57:44 +01:00
m_p2p . get_public_peerlist ( gray_list , white_list ) ;
2015-05-28 15:07:31 +02:00
2018-12-05 23:25:27 +01:00
res . white_list . reserve ( white_list . size ( ) ) ;
2015-02-05 10:11:20 +01:00
for ( auto & entry : white_list )
{
2018-12-16 18:57:44 +01:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 12:35:54 +02:00
res . white_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed ) ;
2017-05-27 12:35:54 +02:00
else
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . white_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed ) ;
2015-02-05 10:11:20 +01:00
}
2018-12-05 23:25:27 +01:00
res . gray_list . reserve ( gray_list . size ( ) ) ;
2015-02-05 10:11:20 +01:00
for ( auto & entry : gray_list )
{
2018-12-16 18:57:44 +01:00
if ( entry . adr . get_type_id ( ) = = epee : : net_utils : : ipv4_network_address : : get_type_id ( ) )
2017-05-27 12:35:54 +02:00
res . gray_list . emplace_back ( entry . id , entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . ip ( ) ,
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
entry . adr . as < epee : : net_utils : : ipv4_network_address > ( ) . port ( ) , entry . last_seen , entry . pruning_seed ) ;
2017-05-27 12:35:54 +02:00
else
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . gray_list . emplace_back ( entry . id , entry . adr . str ( ) , entry . last_seen , entry . pruning_seed ) ;
2015-02-05 10:11:20 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_hash_rate ( const COMMAND_RPC_SET_LOG_HASH_RATE : : request & req , COMMAND_RPC_SET_LOG_HASH_RATE : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_hash_rate ) ;
2015-02-05 10:11:20 +01:00
if ( m_core . get_miner ( ) . is_mining ( ) )
{
m_core . get_miner ( ) . do_print_hashrate ( req . visible ) ;
res . status = CORE_RPC_STATUS_OK ;
}
else
{
res . status = CORE_RPC_STATUS_NOT_MINING ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_level ( const COMMAND_RPC_SET_LOG_LEVEL : : request & req , COMMAND_RPC_SET_LOG_LEVEL : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_level ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
if ( req . level < 0 | | req . level > 4 )
2015-02-05 10:11:20 +01:00
{
res . status = " Error: log level not valid " ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
return true ;
2015-02-05 10:11:20 +01:00
}
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
mlog_set_log_level ( req . level ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_log_categories ( const COMMAND_RPC_SET_LOG_CATEGORIES : : request & req , COMMAND_RPC_SET_LOG_CATEGORIES : : response & res , const connection_context * ctx )
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_log_categories ) ;
2017-02-12 12:37:09 +01:00
mlog_set_log ( req . categories . c_str ( ) ) ;
2017-09-22 18:54:58 +02:00
res . categories = mlog_get_categories ( ) ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 17:34:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-02-05 10:11:20 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool ( const COMMAND_RPC_GET_TRANSACTION_POOL : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL > ( invoke_http_mode : : JON , " /get_transaction_pool " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transactions_and_spent_keys_info ( res . transactions , res . spent_key_images , ! request_has_rpc_origin | | ! restricted ) ;
2018-10-18 01:01:56 +02:00
for ( tx_info & txi : res . transactions )
txi . tx_blob = epee : : string_tools : : buff_to_hex_nodelimer ( txi . tx_blob ) ;
2015-02-05 10:11:20 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_hashes_bin ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN : : response & res , const connection_context * ctx )
2017-03-22 19:03:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
2018-01-20 11:38:14 +01:00
bool r ;
2018-06-20 13:48:10 +02:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES_BIN > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes.bin " , req , res , r ) )
2018-01-20 11:38:14 +01:00
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_hashes ( res . tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2017-03-22 19:03:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_hashes ( const COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_HASHES : : response & res , const connection_context * ctx )
2018-06-20 13:48:10 +02:00
{
PERF_TIMER ( on_get_transaction_pool_hashes ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_HASHES > ( invoke_http_mode : : JON , " /get_transaction_pool_hashes " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
2018-06-20 13:48:10 +02:00
std : : vector < crypto : : hash > tx_hashes ;
2019-01-11 20:09:39 +01:00
m_core . get_pool_transaction_hashes ( tx_hashes , ! request_has_rpc_origin | | ! restricted ) ;
2018-06-20 13:48:10 +02:00
res . tx_hashes . reserve ( tx_hashes . size ( ) ) ;
for ( const crypto : : hash & tx_hash : tx_hashes )
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( tx_hash ) ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_transaction_pool_stats ( const COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_STATS : : response & res , const connection_context * ctx )
2017-05-31 20:11:56 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_transaction_pool_stats ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_STATS > ( invoke_http_mode : : JON , " /get_transaction_pool_stats " , req , res , r ) )
return r ;
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
const bool request_has_rpc_origin = ctx ! = NULL ;
m_core . get_pool_transaction_stats ( res . pool_stats , ! request_has_rpc_origin | | ! restricted ) ;
2017-05-31 20:11:56 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_daemon ( const COMMAND_RPC_STOP_DAEMON : : request & req , COMMAND_RPC_STOP_DAEMON : : response & res , const connection_context * ctx )
2015-02-05 10:11:20 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_daemon ) ;
2015-02-05 10:11:20 +01:00
// FIXME: replace back to original m_p2p.send_stop_signal() after
// investigating why that isn't working quite right.
2015-02-05 11:38:49 +01:00
m_p2p . send_stop_signal ( ) ;
2015-02-05 10:11:20 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblockcount ( const COMMAND_RPC_GETBLOCKCOUNT : : request & req , COMMAND_RPC_GETBLOCKCOUNT : : response & res , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblockcount ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 12:46:11 +01:00
res . count = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblockhash ( const COMMAND_RPC_GETBLOCKHASH : : request & req , COMMAND_RPC_GETBLOCKHASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblockhash ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-03 23:07:58 +01:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong parameters, expected height " ;
return false ;
}
uint64_t h = req [ 0 ] ;
if ( m_core . get_current_blockchain_height ( ) < = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( h ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-03-03 23:07:58 +01:00
}
res = string_tools : : pod_to_hex ( m_core . get_block_id_by_height ( h ) ) ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-10-06 11:27:34 +02:00
// equivalent of strstr, but with arbitrary bytes (ie, NULs)
// This does not differentiate between "not found" and "found at offset 0"
2018-11-23 14:11:40 +01:00
size_t slow_memmem ( const void * start_buff , size_t buflen , const void * pat , size_t patlen )
2014-03-03 23:07:58 +01:00
{
2014-10-06 11:27:34 +02:00
const void * buf = start_buff ;
const void * end = ( const char * ) buf + buflen ;
if ( patlen > buflen | | patlen = = 0 ) return 0 ;
while ( buflen > 0 & & ( buf = memchr ( buf , ( ( const char * ) pat ) [ 0 ] , buflen - patlen + 1 ) ) )
2014-03-03 23:07:58 +01:00
{
if ( memcmp ( buf , pat , patlen ) = = 0 )
2014-10-06 11:27:34 +02:00
return ( const char * ) buf - ( const char * ) start_buff ;
buf = ( const char * ) buf + 1 ;
buflen = ( const char * ) end - ( const char * ) buf ;
2014-03-03 23:07:58 +01:00
}
return 0 ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_getblocktemplate ( const COMMAND_RPC_GETBLOCKTEMPLATE : : request & req , COMMAND_RPC_GETBLOCKTEMPLATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_getblocktemplate ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GETBLOCKTEMPLATE > ( invoke_http_mode : : JON_RPC , " getblocktemplate " , req , res , r ) )
return r ;
2014-03-20 12:46:11 +01:00
if ( ! check_core_ready ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_CORE_BUSY ;
error_resp . message = " Core is busy " ;
return false ;
}
2014-03-03 23:07:58 +01:00
if ( req . reserve_size > 255 )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_RESERVE_SIZE ;
2018-03-01 12:36:19 +01:00
error_resp . message = " Too big reserved size, maximum 255 " ;
2014-03-03 23:07:58 +01:00
return false ;
}
2017-02-19 03:42:10 +01:00
cryptonote : : address_parse_info info ;
2014-03-03 23:07:58 +01:00
2018-11-16 05:32:05 +01:00
if ( ! req . wallet_address . size ( ) | | ! cryptonote : : get_account_address_from_str ( info , nettype ( ) , req . wallet_address ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_WALLET_ADDRESS ;
error_resp . message = " Failed to parse wallet address " ;
return false ;
}
2017-02-19 03:42:10 +01:00
if ( info . is_subaddress )
{
error_resp . code = CORE_RPC_ERROR_CODE_MINING_TO_SUBADDRESS ;
error_resp . message = " Mining to subaddress is not supported yet " ;
return false ;
}
2014-03-03 23:07:58 +01:00
2018-11-19 18:55:53 +01:00
block b ;
2014-03-03 23:07:58 +01:00
cryptonote : : blobdata blob_reserve ;
blob_reserve . resize ( req . reserve_size , 0 ) ;
2017-02-19 03:42:10 +01:00
if ( ! m_core . get_block_template ( b , info . address , res . difficulty , res . height , res . expected_reward , blob_reserve ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to create block template " ) ;
return false ;
}
blobdata block_blob = t_serializable_object_to_blob ( b ) ;
2014-05-03 18:19:43 +02:00
crypto : : public_key tx_pub_key = cryptonote : : get_tx_pub_key_from_extra ( b . miner_tx ) ;
2017-10-10 16:47:08 +02:00
if ( tx_pub_key = = crypto : : null_pkey )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
2018-10-05 04:12:53 +02:00
LOG_ERROR ( " Failed to get tx pub key in coinbase extra " ) ;
2014-03-03 23:07:58 +01:00
return false ;
}
res . reserved_offset = slow_memmem ( ( void * ) block_blob . data ( ) , block_blob . size ( ) , & tx_pub_key , sizeof ( tx_pub_key ) ) ;
if ( ! res . reserved_offset )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to find tx pub key in blockblob " ) ;
return false ;
}
2017-08-26 18:38:27 +02:00
res . reserved_offset + = sizeof ( tx_pub_key ) + 2 ; //2 bytes: tag for TX_EXTRA_NONCE(1 byte), counter in TX_EXTRA_NONCE(1 byte)
2014-03-03 23:07:58 +01:00
if ( res . reserved_offset + req . reserve_size > block_blob . size ( ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: failed to create block template " ;
LOG_ERROR ( " Failed to calculate offset for " ) ;
return false ;
}
2016-03-30 03:50:51 +02:00
blobdata hashing_blob = get_block_hashing_blob ( b ) ;
2015-01-06 17:37:10 +01:00
res . prev_hash = string_tools : : pod_to_hex ( b . prev_id ) ;
2014-03-03 23:07:58 +01:00
res . blocktemplate_blob = string_tools : : buff_to_hex_nodelimer ( block_blob ) ;
2016-03-30 03:50:51 +02:00
res . blockhashing_blob = string_tools : : buff_to_hex_nodelimer ( hashing_blob ) ;
2014-05-25 19:06:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_submitblock ( const COMMAND_RPC_SUBMITBLOCK : : request & req , COMMAND_RPC_SUBMITBLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-03-03 23:07:58 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_submitblock ) ;
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( m_should_use_bootstrap_daemon )
{
res . status = " This command is unsupported for bootstrap daemon " ;
return false ;
}
}
2014-03-20 12:46:11 +01:00
CHECK_CORE_READY ( ) ;
2014-03-03 23:07:58 +01:00
if ( req . size ( ) ! = 1 )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Wrong param " ;
return false ;
}
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( req [ 0 ] , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2016-10-10 22:41:24 +02:00
2014-06-11 16:46:56 +02:00
// Fixing of high orphan issue for most pools
// Thanks Boolberry!
2018-11-19 18:55:53 +01:00
block b ;
2014-06-11 16:46:56 +02:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2014-06-11 23:32:53 +02:00
// Fix from Boolberry neglects to check block
// size, do that with the function below
if ( ! m_core . check_incoming_block_size ( blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB_SIZE ;
error_resp . message = " Block bloc size is too big, rejecting block " ;
return false ;
}
2014-06-11 16:46:56 +02:00
if ( ! m_core . handle_block_found ( b ) )
2014-03-03 23:07:58 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_BLOCK_NOT_ACCEPTED ;
error_resp . message = " Block not accepted " ;
return false ;
}
2014-05-25 19:06:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
2014-03-03 23:07:58 +01:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_generateblocks ( const COMMAND_RPC_GENERATEBLOCKS : : request & req , COMMAND_RPC_GENERATEBLOCKS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-06-14 21:11:49 +02:00
{
PERF_TIMER ( on_generateblocks ) ;
CHECK_CORE_READY ( ) ;
res . status = CORE_RPC_STATUS_OK ;
if ( m_core . get_nettype ( ) ! = FAKECHAIN )
{
error_resp . code = CORE_RPC_ERROR_CODE_REGTEST_REQUIRED ;
error_resp . message = " Regtest required when generating blocks " ;
return false ;
}
COMMAND_RPC_GETBLOCKTEMPLATE : : request template_req ;
COMMAND_RPC_GETBLOCKTEMPLATE : : response template_res ;
COMMAND_RPC_SUBMITBLOCK : : request submit_req ;
COMMAND_RPC_SUBMITBLOCK : : response submit_res ;
template_req . reserve_size = 1 ;
template_req . wallet_address = req . wallet_address ;
submit_req . push_back ( boost : : value_initialized < std : : string > ( ) ) ;
res . height = m_core . get_blockchain_storage ( ) . get_current_blockchain_height ( ) ;
bool r ;
for ( size_t i = 0 ; i < req . amount_of_blocks ; i + + )
{
2019-01-11 20:09:39 +01:00
r = on_getblocktemplate ( template_req , template_res , error_resp , ctx ) ;
2018-06-14 21:11:49 +02:00
res . status = template_res . status ;
if ( ! r ) return false ;
blobdata blockblob ;
if ( ! string_tools : : parse_hexstr_to_binbuff ( template_res . blocktemplate_blob , blockblob ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
2018-11-19 18:55:53 +01:00
block b ;
2018-06-14 21:11:49 +02:00
if ( ! parse_and_validate_block_from_blob ( blockblob , b ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_BLOCKBLOB ;
error_resp . message = " Wrong block blob " ;
return false ;
}
miner : : find_nonce_for_given_block ( b , template_res . difficulty , template_res . height ) ;
submit_req . front ( ) = string_tools : : buff_to_hex_nodelimer ( block_to_blob ( b ) ) ;
2019-01-11 20:09:39 +01:00
r = on_submitblock ( submit_req , submit_res , error_resp , ctx ) ;
2018-06-14 21:11:49 +02:00
res . status = submit_res . status ;
if ( ! r ) return false ;
res . height = template_res . height ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2014-04-09 14:14:35 +02:00
uint64_t core_rpc_server : : get_block_reward ( const block & blk )
{
uint64_t reward = 0 ;
2017-01-22 21:38:10 +01:00
for ( const tx_out & out : blk . miner_tx . vout )
2014-04-09 14:14:35 +02:00
{
reward + = out . amount ;
}
return reward ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-04-05 16:15:15 +02:00
bool core_rpc_server : : fill_block_header_response ( const block & blk , bool orphan_status , uint64_t height , const crypto : : hash & hash , block_header_response & response , bool fill_pow_hash )
2016-09-29 15:38:12 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( fill_block_header_response ) ;
2016-09-29 15:38:12 +02:00
response . major_version = blk . major_version ;
response . minor_version = blk . minor_version ;
response . timestamp = blk . timestamp ;
response . prev_hash = string_tools : : pod_to_hex ( blk . prev_id ) ;
response . nonce = blk . nonce ;
response . orphan_status = orphan_status ;
response . height = height ;
response . depth = m_core . get_current_blockchain_height ( ) - height - 1 ;
response . hash = string_tools : : pod_to_hex ( hash ) ;
response . difficulty = m_core . get_blockchain_storage ( ) . block_difficulty ( height ) ;
2018-09-13 14:41:36 +02:00
response . cumulative_difficulty = response . block_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( height ) ;
2016-09-29 15:38:12 +02:00
response . reward = get_block_reward ( blk ) ;
2018-07-18 23:24:53 +02:00
response . block_size = response . block_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_weight ( height ) ;
2017-01-08 12:14:11 +01:00
response . num_txes = blk . tx_hashes . size ( ) ;
2018-04-05 16:15:15 +02:00
response . pow_hash = fill_pow_hash ? string_tools : : pod_to_hex ( get_block_longhash ( blk , height ) ) : " " ;
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 18:18:50 +01:00
response . long_term_weight = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_long_term_weight ( height ) ;
2014-04-09 14:14:35 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-01-20 11:38:14 +01:00
template < typename COMMAND_TYPE >
bool core_rpc_server : : use_bootstrap_daemon_if_necessary ( const invoke_http_mode & mode , const std : : string & command_name , const typename COMMAND_TYPE : : request & req , typename COMMAND_TYPE : : response & res , bool & r )
{
res . untrusted = false ;
if ( m_bootstrap_daemon_address . empty ( ) )
return false ;
boost : : unique_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
if ( ! m_should_use_bootstrap_daemon )
{
MINFO ( " The local daemon is fully synced. Not switching back to the bootstrap daemon " ) ;
return false ;
}
auto current_time = std : : chrono : : system_clock : : now ( ) ;
if ( current_time - m_bootstrap_height_check_time > std : : chrono : : seconds ( 30 ) ) // update every 30s
{
m_bootstrap_height_check_time = current_time ;
uint64_t top_height ;
crypto : : hash top_hash ;
m_core . get_blockchain_top ( top_height , top_hash ) ;
+ + top_height ; // turn top block height into blockchain height
// query bootstrap daemon's height
cryptonote : : COMMAND_RPC_GET_HEIGHT : : request getheight_req ;
cryptonote : : COMMAND_RPC_GET_HEIGHT : : response getheight_res ;
bool ok = epee : : net_utils : : invoke_http_json ( " /getheight " , getheight_req , getheight_res , m_http_client ) ;
ok = ok & & getheight_res . status = = CORE_RPC_STATUS_OK ;
m_should_use_bootstrap_daemon = ok & & top_height + 10 < getheight_res . height ;
MINFO ( ( m_should_use_bootstrap_daemon ? " Using " : " Not using " ) < < " the bootstrap daemon (our height: " < < top_height < < " , bootstrap daemon's height: " < < getheight_res . height < < " ) " ) ;
}
if ( ! m_should_use_bootstrap_daemon )
return false ;
if ( mode = = invoke_http_mode : : JON )
{
r = epee : : net_utils : : invoke_http_json ( command_name , req , res , m_http_client ) ;
}
else if ( mode = = invoke_http_mode : : BIN )
{
r = epee : : net_utils : : invoke_http_bin ( command_name , req , res , m_http_client ) ;
}
else if ( mode = = invoke_http_mode : : JON_RPC )
{
epee : : json_rpc : : request < typename COMMAND_TYPE : : request > json_req = AUTO_VAL_INIT ( json_req ) ;
epee : : json_rpc : : response < typename COMMAND_TYPE : : response , std : : string > json_resp = AUTO_VAL_INIT ( json_resp ) ;
json_req . jsonrpc = " 2.0 " ;
json_req . id = epee : : serialization : : storage_entry ( 0 ) ;
json_req . method = command_name ;
json_req . params = req ;
r = net_utils : : invoke_http_json ( " /json_rpc " , json_req , json_resp , m_http_client ) ;
if ( r )
res = json_resp . result ;
}
else
{
MERROR ( " Unknown invoke_http_mode: " < < mode ) ;
return false ;
}
m_was_bootstrap_ever_used = true ;
r = r & & res . status = = CORE_RPC_STATUS_OK ;
res . untrusted = true ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_last_block_header ( const COMMAND_RPC_GET_LAST_BLOCK_HEADER : : request & req , COMMAND_RPC_GET_LAST_BLOCK_HEADER : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-04-09 14:14:35 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_last_block_header ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LAST_BLOCK_HEADER > ( invoke_http_mode : : JON_RPC , " getlastblockheader " , req , res , r ) )
return r ;
2017-10-20 21:49:23 +02:00
CHECK_CORE_READY ( ) ;
2014-04-09 14:14:35 +02:00
uint64_t last_block_height ;
crypto : : hash last_block_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( last_block_height , last_block_hash ) ;
2014-04-09 14:14:35 +02:00
block last_block ;
bool have_last_block = m_core . get_block_by_hash ( last_block_hash , last_block ) ;
if ( ! have_last_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get last block. " ;
return false ;
}
2018-04-05 16:15:15 +02:00
bool response_filled = fill_block_header_response ( last_block , false , last_block_height , last_block_hash , res . block_header , req . fill_pow_hash ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_header_by_hash ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_header_by_hash ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HASH > ( invoke_http_mode : : JON_RPC , " getblockheaderbyhash " , req , res , r ) )
return r ;
2014-04-09 14:14:35 +02:00
crypto : : hash block_hash ;
bool hash_parsed = parse_hash256 ( req . hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + req . hash + ' . ' ;
return false ;
}
block blk ;
2017-01-22 13:20:55 +01:00
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
2014-04-09 14:14:35 +02:00
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + req . hash + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
2018-04-05 16:15:15 +02:00
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , res . block_header , req . fill_pow_hash ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_headers_range ( const COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : request & req , COMMAND_RPC_GET_BLOCK_HEADERS_RANGE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_headers_range ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADERS_RANGE > ( invoke_http_mode : : JON_RPC , " getblockheadersrange " , req , res , r ) )
return r ;
2016-10-02 11:21:21 +02:00
const uint64_t bc_height = m_core . get_current_blockchain_height ( ) ;
if ( req . start_height > = bc_height | | req . end_height > = bc_height | | req . start_height > req . end_height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
error_resp . message = " Invalid start/end heights. " ;
return false ;
}
for ( uint64_t h = req . start_height ; h < = req . end_height ; + + h )
{
crypto : : hash block_hash = m_core . get_block_id_by_height ( h ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by height. Height = " + boost : : lexical_cast < std : : string > ( h ) + " . Hash = " + epee : : string_tools : : pod_to_hex ( block_hash ) + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2016-10-02 11:21:21 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
if ( block_height ! = h )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong height " ;
return false ;
}
2016-10-04 13:55:55 +02:00
res . headers . push_back ( block_header_response ( ) ) ;
2018-04-05 16:15:15 +02:00
bool response_filled = fill_block_header_response ( blk , false , block_height , block_hash , res . headers . back ( ) , req . fill_pow_hash ) ;
2017-06-18 10:12:54 +02:00
if ( ! response_filled )
2016-10-02 11:21:21 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block_header_by_height ( const COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : request & req , COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block_header_by_height ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK_HEADER_BY_HEIGHT > ( invoke_http_mode : : JON_RPC , " getblockheaderbyheight " , req , res , r ) )
return r ;
2014-04-09 14:14:35 +02:00
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2014-04-09 14:14:35 +02:00
return false ;
}
crypto : : hash block_hash = m_core . get_block_id_by_height ( req . height ) ;
block blk ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk ) ;
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2014-04-30 19:52:21 +02:00
error_resp . message = " Internal error: can't get block by height. Height = " + std : : to_string ( req . height ) + ' . ' ;
2014-04-09 14:14:35 +02:00
return false ;
}
2018-04-05 16:15:15 +02:00
bool response_filled = fill_block_header_response ( blk , false , req . height , block_hash , res . block_header , req . fill_pow_hash ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2014-04-09 14:14:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_block ( const COMMAND_RPC_GET_BLOCK : : request & req , COMMAND_RPC_GET_BLOCK : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_block ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BLOCK > ( invoke_http_mode : : JON_RPC , " getblock " , req , res , r ) )
return r ;
2015-10-13 22:37:35 +02:00
crypto : : hash block_hash ;
if ( ! req . hash . empty ( ) )
{
bool hash_parsed = parse_hash256 ( req . hash , block_hash ) ;
if ( ! hash_parsed )
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Failed to parse hex representation of block hash. Hex = " + req . hash + ' . ' ;
return false ;
}
}
else
{
if ( m_core . get_current_blockchain_height ( ) < = req . height )
{
error_resp . code = CORE_RPC_ERROR_CODE_TOO_BIG_HEIGHT ;
2018-11-03 00:31:31 +01:00
error_resp . message = std : : string ( " Requested block height: " ) + std : : to_string ( req . height ) + " greater than current top block height: " + std : : to_string ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2015-10-13 22:37:35 +02:00
return false ;
}
block_hash = m_core . get_block_id_by_height ( req . height ) ;
}
block blk ;
2017-01-22 13:20:55 +01:00
bool orphan = false ;
bool have_block = m_core . get_block_by_hash ( block_hash , blk , & orphan ) ;
2015-10-13 22:37:35 +02:00
if ( ! have_block )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't get block by hash. Hash = " + req . hash + ' . ' ;
return false ;
}
2017-12-11 23:36:58 +01:00
if ( blk . miner_tx . vin . size ( ) ! = 1 | | blk . miner_tx . vin . front ( ) . type ( ) ! = typeid ( txin_gen ) )
2015-10-13 22:37:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: coinbase transaction in the block has the wrong type " ;
return false ;
}
uint64_t block_height = boost : : get < txin_gen > ( blk . miner_tx . vin . front ( ) ) . height ;
2018-04-05 16:15:15 +02:00
bool response_filled = fill_block_header_response ( blk , orphan , block_height , block_hash , res . block_header , req . fill_pow_hash ) ;
2016-09-29 15:38:12 +02:00
if ( ! response_filled )
2015-10-13 22:37:35 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Internal error: can't produce valid response. " ;
return false ;
}
2017-10-28 16:25:47 +02:00
res . miner_tx_hash = epee : : string_tools : : pod_to_hex ( cryptonote : : get_transaction_hash ( blk . miner_tx ) ) ;
2015-10-13 22:37:35 +02:00
for ( size_t n = 0 ; n < blk . tx_hashes . size ( ) ; + + n )
{
res . tx_hashes . push_back ( epee : : string_tools : : pod_to_hex ( blk . tx_hashes [ n ] ) ) ;
}
2016-06-09 22:48:29 +02:00
res . blob = string_tools : : buff_to_hex_nodelimer ( t_serializable_object_to_blob ( blk ) ) ;
2015-10-13 22:37:35 +02:00
res . json = obj_to_json_str ( blk ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_connections ( const COMMAND_RPC_GET_CONNECTIONS : : request & req , COMMAND_RPC_GET_CONNECTIONS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-19 01:33:03 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_connections ) ;
2014-07-19 01:33:03 +02:00
res . connections = m_p2p . get_payload_object ( ) . get_connections ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_info_json ( const COMMAND_RPC_GET_INFO : : request & req , COMMAND_RPC_GET_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2014-07-22 20:00:10 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_info_json ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_INFO > ( invoke_http_mode : : JON_RPC , " get_info " , req , res , r ) )
{
res . bootstrap_daemon_address = m_bootstrap_daemon_address ;
crypto : : hash top_hash ;
m_core . get_blockchain_top ( res . height_without_bootstrap , top_hash ) ;
+ + res . height_without_bootstrap ; // turn top block height into blockchain height
res . was_bootstrap_ever_used = true ;
return r ;
}
2014-07-22 20:00:10 +02:00
2019-01-11 20:09:39 +01:00
const bool restricted = m_restricted & & ctx ;
2016-10-15 15:35:29 +02:00
crypto : : hash top_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2016-10-15 15:35:29 +02:00
+ + res . height ; // turn top block height into blockchain height
res . top_block_hash = string_tools : : pod_to_hex ( top_hash ) ;
2014-07-22 20:00:10 +02:00
res . target_height = m_core . get_target_blockchain_height ( ) ;
res . difficulty = m_core . get_blockchain_storage ( ) . get_difficulty_for_next_block ( ) ;
2016-01-29 16:09:17 +01:00
res . target = m_core . get_blockchain_storage ( ) . get_current_hard_fork_version ( ) < 2 ? DIFFICULTY_TARGET_V1 : DIFFICULTY_TARGET_V2 ;
2014-07-22 20:00:10 +02:00
res . tx_count = m_core . get_blockchain_storage ( ) . get_total_transactions ( ) - res . height ; //without coinbase
res . tx_pool_size = m_core . get_pool_transactions_count ( ) ;
2019-01-11 20:09:39 +01:00
res . alt_blocks_count = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_alternative_blocks_count ( ) ;
2018-12-16 18:57:44 +01:00
uint64_t total_conn = restricted ? 0 : m_p2p . get_public_connections_count ( ) ;
res . outgoing_connections_count = restricted ? 0 : m_p2p . get_public_outgoing_connections_count ( ) ;
2019-01-11 20:09:39 +01:00
res . incoming_connections_count = restricted ? 0 : ( total_conn - res . outgoing_connections_count ) ;
res . rpc_connections_count = restricted ? 0 : get_connections_count ( ) ;
2018-12-16 18:57:44 +01:00
res . white_peerlist_size = restricted ? 0 : m_p2p . get_public_white_peers_count ( ) ;
res . grey_peerlist_size = restricted ? 0 : m_p2p . get_public_gray_peers_count ( ) ;
2018-11-16 05:32:05 +01:00
cryptonote : : network_type net_type = nettype ( ) ;
res . mainnet = net_type = = MAINNET ;
res . testnet = net_type = = TESTNET ;
res . stagenet = net_type = = STAGENET ;
res . nettype = net_type = = MAINNET ? " mainnet " : net_type = = TESTNET ? " testnet " : net_type = = STAGENET ? " stagenet " : " fakechain " ;
2016-10-15 16:00:21 +02:00
res . cumulative_difficulty = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_cumulative_difficulty ( res . height - 1 ) ;
2018-07-18 23:24:53 +02:00
res . block_size_limit = res . block_weight_limit = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_limit ( ) ;
res . block_size_median = res . block_weight_median = m_core . get_blockchain_storage ( ) . get_current_cumulative_block_weight_median ( ) ;
2014-07-22 20:00:10 +02:00
res . status = CORE_RPC_STATUS_OK ;
2019-01-11 20:09:39 +01:00
res . start_time = restricted ? 0 : ( uint64_t ) m_core . get_start_time ( ) ;
res . free_space = restricted ? std : : numeric_limits < uint64_t > : : max ( ) : m_core . get_free_space ( ) ;
2017-11-30 16:44:01 +01:00
res . offline = m_core . offline ( ) ;
2019-01-11 20:09:39 +01:00
res . bootstrap_daemon_address = restricted ? " " : m_bootstrap_daemon_address ;
res . height_without_bootstrap = restricted ? 0 : res . height ;
if ( restricted )
2018-11-20 22:41:03 +01:00
res . was_bootstrap_ever_used = false ;
else
2018-01-20 11:38:14 +01:00
{
boost : : shared_lock < boost : : shared_mutex > lock ( m_bootstrap_daemon_mutex ) ;
res . was_bootstrap_ever_used = m_was_bootstrap_ever_used ;
}
2019-01-11 20:09:39 +01:00
res . database_size = restricted ? 0 : m_core . get_blockchain_storage ( ) . get_db ( ) . get_database_size ( ) ;
res . update_available = restricted ? false : m_core . is_update_available ( ) ;
res . version = restricted ? " " : MONERO_VERSION ;
2014-07-22 20:00:10 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_hard_fork_info ( const COMMAND_RPC_HARD_FORK_INFO : : request & req , COMMAND_RPC_HARD_FORK_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-09-19 17:34:29 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_hard_fork_info ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_HARD_FORK_INFO > ( invoke_http_mode : : JON_RPC , " hard_fork_info " , req , res , r ) )
return r ;
2015-09-19 17:34:29 +02:00
const Blockchain & blockchain = m_core . get_blockchain_storage ( ) ;
2016-08-12 20:19:25 +02:00
uint8_t version = req . version > 0 ? req . version : blockchain . get_next_hard_fork_version ( ) ;
2015-09-19 17:34:29 +02:00
res . version = blockchain . get_current_hard_fork_version ( ) ;
2015-12-19 15:52:30 +01:00
res . enabled = blockchain . get_hard_fork_voting_info ( version , res . window , res . votes , res . threshold , res . earliest_height , res . voting ) ;
2015-09-19 17:34:29 +02:00
res . state = blockchain . get_hard_fork_state ( ) ;
2015-10-26 11:17:48 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-09-19 17:34:29 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_bans ( const COMMAND_RPC_GETBANS : : request & req , COMMAND_RPC_GETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 01:04:22 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_bans ) ;
2015-11-26 01:04:22 +01:00
2016-03-12 14:44:55 +01:00
auto now = time ( nullptr ) ;
2017-05-27 12:35:54 +02:00
std : : map < std : : string , time_t > blocked_hosts = m_p2p . get_blocked_hosts ( ) ;
for ( std : : map < std : : string , time_t > : : const_iterator i = blocked_hosts . begin ( ) ; i ! = blocked_hosts . end ( ) ; + + i )
2015-11-26 01:04:22 +01:00
{
2016-03-12 14:44:55 +01:00
if ( i - > second > now ) {
COMMAND_RPC_GETBANS : : ban b ;
2017-05-27 12:35:54 +02:00
b . host = i - > first ;
b . ip = 0 ;
uint32_t ip ;
if ( epee : : string_tools : : get_ip_int32_from_string ( ip , i - > first ) )
b . ip = ip ;
2016-03-12 14:44:55 +01:00
b . seconds = i - > second - now ;
res . bans . push_back ( b ) ;
}
2015-11-26 01:04:22 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_bans ( const COMMAND_RPC_SETBANS : : request & req , COMMAND_RPC_SETBANS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2015-11-26 01:04:22 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_bans ) ;
2015-11-26 01:04:22 +01:00
for ( auto i = req . bans . begin ( ) ; i ! = req . bans . end ( ) ; + + i )
{
2017-05-27 12:35:54 +02:00
epee : : net_utils : : network_address na ;
if ( ! i - > host . empty ( ) )
{
2018-12-16 18:57:44 +01:00
auto na_parsed = net : : get_network_address ( i - > host , 0 ) ;
if ( ! na_parsed )
2017-05-27 12:35:54 +02:00
{
error_resp . code = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
error_resp . message = " Unsupported host type " ;
return false ;
}
2018-12-16 18:57:44 +01:00
na = std : : move ( * na_parsed ) ;
2017-05-27 12:35:54 +02:00
}
else
{
2017-08-25 17:14:46 +02:00
na = epee : : net_utils : : ipv4_network_address { i - > ip , 0 } ;
2017-05-27 12:35:54 +02:00
}
2015-11-26 01:04:22 +01:00
if ( i - > ban )
2017-05-27 12:35:54 +02:00
m_p2p . block_host ( na , i - > seconds ) ;
2015-11-26 01:04:22 +01:00
else
2017-05-27 12:35:54 +02:00
m_p2p . unblock_host ( na ) ;
2015-11-26 01:04:22 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_flush_txpool ( const COMMAND_RPC_FLUSH_TRANSACTION_POOL : : request & req , COMMAND_RPC_FLUSH_TRANSACTION_POOL : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-01-30 14:28:26 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_flush_txpool ) ;
2016-01-30 14:28:26 +01:00
bool failed = false ;
2018-04-16 01:16:02 +02:00
std : : vector < crypto : : hash > txids ;
2016-01-30 14:28:26 +01:00
if ( req . txids . empty ( ) )
{
2018-04-16 01:16:02 +02:00
std : : vector < transaction > pool_txs ;
2016-01-30 14:28:26 +01:00
bool r = m_core . get_pool_transactions ( pool_txs ) ;
if ( ! r )
{
res . status = " Failed to get txpool contents " ;
return true ;
}
for ( const auto & tx : pool_txs )
{
txids . push_back ( cryptonote : : get_transaction_hash ( tx ) ) ;
}
}
else
{
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
failed = true ;
}
2017-12-07 22:33:20 +01:00
else
{
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
txids . push_back ( txid ) ;
}
2016-01-30 14:28:26 +01:00
}
}
if ( ! m_core . get_blockchain_storage ( ) . flush_txes_from_pool ( txids ) )
{
2017-12-07 22:33:20 +01:00
res . status = " Failed to remove one or more tx(es) " ;
2016-01-30 14:28:26 +01:00
return false ;
}
if ( failed )
{
2017-12-07 22:33:20 +01:00
if ( txids . empty ( ) )
res . status = " Failed to parse txid " ;
else
res . status = " Failed to parse some of the txids " ;
2016-01-30 14:28:26 +01:00
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_histogram ( const COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : request & req , COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-03-26 15:30:23 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_output_histogram ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_HISTOGRAM > ( invoke_http_mode : : JON_RPC , " get_output_histogram " , req , res , r ) )
return r ;
2016-03-26 15:30:23 +01:00
2016-09-17 16:45:51 +02:00
std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > histogram ;
2016-03-26 15:30:23 +01:00
try
{
2018-03-22 18:51:58 +01:00
histogram = m_core . get_blockchain_storage ( ) . get_output_histogram ( req . amounts , req . unlocked , req . recent_cutoff , req . min_count ) ;
2016-03-26 15:30:23 +01:00
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output histogram " ;
return true ;
}
res . histogram . clear ( ) ;
res . histogram . reserve ( histogram . size ( ) ) ;
for ( const auto & i : histogram )
{
2016-09-17 16:45:51 +02:00
if ( std : : get < 0 > ( i . second ) > = req . min_count & & ( std : : get < 0 > ( i . second ) < = req . max_count | | req . max_count = = 0 ) )
res . histogram . push_back ( COMMAND_RPC_GET_OUTPUT_HISTOGRAM : : entry ( i . first , std : : get < 0 > ( i . second ) , std : : get < 1 > ( i . second ) , std : : get < 2 > ( i . second ) ) ) ;
2016-03-26 15:30:23 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_version ( const COMMAND_RPC_GET_VERSION : : request & req , COMMAND_RPC_GET_VERSION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-07-10 17:49:40 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_version ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_VERSION > ( invoke_http_mode : : JON_RPC , " get_version " , req , res , r ) )
return r ;
2016-07-10 17:49:40 +02:00
res . version = CORE_RPC_VERSION ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_coinbase_tx_sum ( const COMMAND_RPC_GET_COINBASE_TX_SUM : : request & req , COMMAND_RPC_GET_COINBASE_TX_SUM : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-10 21:45:51 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_coinbase_tx_sum ) ;
2016-10-11 01:55:18 +02:00
std : : pair < uint64_t , uint64_t > amounts = m_core . get_coinbase_tx_sum ( req . height , req . count ) ;
res . emission_amount = amounts . first ;
res . fee_amount = amounts . second ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2016-10-10 21:45:51 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_base_fee_estimate ( const COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : request & req , COMMAND_RPC_GET_BASE_FEE_ESTIMATE : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-10-28 22:19:40 +02:00
{
2018-07-18 23:24:53 +02:00
PERF_TIMER ( on_get_base_fee_estimate ) ;
2018-01-20 11:38:14 +01:00
bool r ;
2018-07-18 23:24:53 +02:00
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_BASE_FEE_ESTIMATE > ( invoke_http_mode : : JON_RPC , " get_fee_estimate " , req , res , r ) )
2018-01-20 11:38:14 +01:00
return r ;
2018-07-18 23:24:53 +02:00
res . fee = m_core . get_blockchain_storage ( ) . get_dynamic_base_fee_estimate ( req . grace_blocks ) ;
res . quantization_mask = Blockchain : : get_fee_quantization_mask ( ) ;
2016-10-28 22:19:40 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_alternate_chains ( const COMMAND_RPC_GET_ALTERNATE_CHAINS : : request & req , COMMAND_RPC_GET_ALTERNATE_CHAINS : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2016-12-17 12:25:15 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_alternate_chains ) ;
2016-12-17 12:25:15 +01:00
try
{
2018-05-20 00:53:05 +02:00
std : : list < std : : pair < Blockchain : : block_extended_info , std : : vector < crypto : : hash > > > chains = m_core . get_blockchain_storage ( ) . get_alternative_chains ( ) ;
2016-12-17 12:25:15 +01:00
for ( const auto & i : chains )
{
2018-05-20 00:53:05 +02:00
res . chains . push_back ( COMMAND_RPC_GET_ALTERNATE_CHAINS : : chain_info { epee : : string_tools : : pod_to_hex ( get_block_hash ( i . first . bl ) ) , i . first . height , i . second . size ( ) , i . first . cumulative_difficulty , { } , std : : string ( ) } ) ;
res . chains . back ( ) . block_hashes . reserve ( i . second . size ( ) ) ;
for ( const crypto : : hash & block_id : i . second )
res . chains . back ( ) . block_hashes . push_back ( epee : : string_tools : : pod_to_hex ( block_id ) ) ;
if ( i . first . height < i . second . size ( ) )
{
res . status = " Error finding alternate chain attachment point " ;
return true ;
}
cryptonote : : block main_chain_parent_block ;
try { main_chain_parent_block = m_core . get_blockchain_storage ( ) . get_db ( ) . get_block_from_height ( i . first . height - i . second . size ( ) ) ; }
catch ( const std : : exception & e ) { res . status = " Error finding alternate chain attachment point " ; return true ; }
res . chains . back ( ) . main_chain_parent_block = epee : : string_tools : : pod_to_hex ( get_block_hash ( main_chain_parent_block ) ) ;
2016-12-17 12:25:15 +01:00
}
res . status = CORE_RPC_STATUS_OK ;
}
catch ( . . . )
{
res . status = " Error retrieving alternate chains " ;
}
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_limit ( const COMMAND_RPC_GET_LIMIT : : request & req , COMMAND_RPC_GET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 21:19:53 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_limit ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_LIMIT > ( invoke_http_mode : : JON , " /get_limit " , req , res , r ) )
return r ;
2017-09-17 21:19:53 +02:00
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_set_limit ( const COMMAND_RPC_SET_LIMIT : : request & req , COMMAND_RPC_SET_LIMIT : : response & res , const connection_context * ctx )
2017-09-17 21:19:53 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_set_limit ) ;
2017-09-17 21:19:53 +02:00
// -1 = reset to default
// 0 = do not modify
if ( req . limit_down > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_down_limit ( req . limit_down ) ;
}
else if ( req . limit_down < 0 )
{
if ( req . limit_down ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 15:26:17 +01:00
epee : : net_utils : : connection_basic : : set_rate_down_limit ( nodetool : : default_limit_down ) ;
2017-09-17 21:19:53 +02:00
}
if ( req . limit_up > 0 )
{
epee : : net_utils : : connection_basic : : set_rate_up_limit ( req . limit_up ) ;
}
else if ( req . limit_up < 0 )
{
if ( req . limit_up ! = - 1 )
{
res . status = CORE_RPC_ERROR_CODE_WRONG_PARAM ;
return false ;
}
2017-11-26 15:26:17 +01:00
epee : : net_utils : : connection_basic : : set_rate_up_limit ( nodetool : : default_limit_up ) ;
2017-09-17 21:19:53 +02:00
}
res . limit_down = epee : : net_utils : : connection_basic : : get_rate_down_limit ( ) ;
res . limit_up = epee : : net_utils : : connection_basic : : get_rate_up_limit ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_out_peers ( const COMMAND_RPC_OUT_PEERS : : request & req , COMMAND_RPC_OUT_PEERS : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_out_peers ) ;
2018-12-16 18:57:44 +01:00
m_p2p . change_max_out_public_peers ( req . out_peers ) ;
2017-10-06 09:40:14 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
2015-04-01 19:00:45 +02:00
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_in_peers ( const COMMAND_RPC_IN_PEERS : : request & req , COMMAND_RPC_IN_PEERS : : response & res , const connection_context * ctx )
2018-01-20 22:44:23 +01:00
{
PERF_TIMER ( on_in_peers ) ;
2018-12-16 18:57:44 +01:00
m_p2p . change_max_in_public_peers ( req . in_peers ) ;
2018-01-20 22:44:23 +01:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_start_save_graph ( const COMMAND_RPC_START_SAVE_GRAPH : : request & req , COMMAND_RPC_START_SAVE_GRAPH : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_start_save_graph ) ;
2015-04-01 19:00:45 +02:00
m_p2p . set_save_graph ( true ) ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-04-01 19:00:45 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_stop_save_graph ( const COMMAND_RPC_STOP_SAVE_GRAPH : : request & req , COMMAND_RPC_STOP_SAVE_GRAPH : : response & res , const connection_context * ctx )
2015-04-01 19:00:45 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_stop_save_graph ) ;
2015-04-01 19:00:45 +02:00
m_p2p . set_save_graph ( false ) ;
2017-02-14 00:05:33 +01:00
res . status = CORE_RPC_STATUS_OK ;
2015-04-01 19:00:45 +02:00
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_update ( const COMMAND_RPC_UPDATE : : request & req , COMMAND_RPC_UPDATE : : response & res , const connection_context * ctx )
2017-02-25 00:16:13 +01:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_update ) ;
2017-02-25 00:16:13 +01:00
static const char software [ ] = " monero " ;
2017-03-04 19:45:33 +01:00
# ifdef BUILD_TAG
static const char buildtag [ ] = BOOST_PP_STRINGIZE ( BUILD_TAG ) ;
2017-09-22 22:48:19 +02:00
static const char subdir [ ] = " cli " ;
2017-02-25 00:16:13 +01:00
# else
static const char buildtag [ ] = " source " ;
2017-09-22 22:48:19 +02:00
static const char subdir [ ] = " source " ;
2017-02-25 00:16:13 +01:00
# endif
if ( req . command ! = " check " & & req . command ! = " download " & & req . command ! = " update " )
{
res . status = std : : string ( " unknown command: ' " ) + req . command + " ' " ;
return true ;
}
std : : string version , hash ;
if ( ! tools : : check_updates ( software , buildtag , version , hash ) )
{
res . status = " Error checking for updates " ;
return true ;
}
if ( tools : : vercmp ( version . c_str ( ) , MONERO_VERSION ) < = 0 )
{
res . update = false ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . update = true ;
res . version = version ;
2017-09-22 22:48:19 +02:00
res . user_uri = tools : : get_update_url ( software , subdir , buildtag , version , true ) ;
res . auto_uri = tools : : get_update_url ( software , subdir , buildtag , version , false ) ;
2017-02-25 00:16:13 +01:00
res . hash = hash ;
if ( req . command = = " check " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
boost : : filesystem : : path path ;
if ( req . path . empty ( ) )
{
std : : string filename ;
const char * slash = strrchr ( res . auto_uri . c_str ( ) , ' / ' ) ;
if ( slash )
filename = slash + 1 ;
else
filename = std : : string ( software ) + " -update- " + version ;
path = epee : : string_tools : : get_current_module_folder ( ) ;
path / = filename ;
}
else
{
path = req . path ;
}
crypto : : hash file_hash ;
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) | | ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) ) )
{
MDEBUG ( " We don't have that file already, downloading " ) ;
if ( ! tools : : download ( path . string ( ) , res . auto_uri ) )
{
MERROR ( " Failed to download " < < res . auto_uri ) ;
return false ;
}
if ( ! tools : : sha256sum ( path . string ( ) , file_hash ) )
{
MERROR ( " Failed to hash " < < path ) ;
return false ;
}
if ( hash ! = epee : : string_tools : : pod_to_hex ( file_hash ) )
{
MERROR ( " Download from " < < res . auto_uri < < " does not match the expected hash " ) ;
return false ;
}
MINFO ( " New version downloaded to " < < path ) ;
}
else
{
MDEBUG ( " We already have " < < path < < " with expected hash " ) ;
}
res . path = path . string ( ) ;
if ( req . command = = " download " )
{
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
res . status = " 'update' not implemented yet " ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_pop_blocks ( const COMMAND_RPC_POP_BLOCKS : : request & req , COMMAND_RPC_POP_BLOCKS : : response & res , const connection_context * ctx )
2018-11-25 22:08:07 +01:00
{
PERF_TIMER ( on_pop_blocks ) ;
m_core . get_blockchain_storage ( ) . pop_blocks ( req . nblocks ) ;
res . height = m_core . get_current_blockchain_height ( ) ;
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_relay_tx ( const COMMAND_RPC_RELAY_TX : : request & req , COMMAND_RPC_RELAY_TX : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-04-02 13:17:35 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_relay_tx ) ;
2017-04-02 13:17:35 +02:00
bool failed = false ;
2017-12-07 22:33:20 +01:00
res . status = " " ;
2017-04-02 13:17:35 +02:00
for ( const auto & str : req . txids )
{
cryptonote : : blobdata txid_data ;
if ( ! epee : : string_tools : : parse_hexstr_to_binbuff ( str , txid_data ) )
{
2017-12-07 22:33:20 +01:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " invalid transaction id: " ) + str ;
2017-04-02 13:17:35 +02:00
failed = true ;
2017-12-07 22:33:20 +01:00
continue ;
2017-04-02 13:17:35 +02:00
}
crypto : : hash txid = * reinterpret_cast < const crypto : : hash * > ( txid_data . data ( ) ) ;
2017-05-14 15:06:55 +02:00
cryptonote : : blobdata txblob ;
bool r = m_core . get_pool_transaction ( txid , txblob ) ;
2017-04-02 13:17:35 +02:00
if ( r )
{
cryptonote_connection_context fake_context = AUTO_VAL_INIT ( fake_context ) ;
NOTIFY_NEW_TRANSACTIONS : : request r ;
2017-05-14 15:06:55 +02:00
r . txs . push_back ( txblob ) ;
2017-04-02 13:17:35 +02:00
m_core . get_protocol ( ) - > relay_transactions ( r , fake_context ) ;
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
}
else
{
2017-12-07 22:33:20 +01:00
if ( ! res . status . empty ( ) ) res . status + = " , " ;
res . status + = std : : string ( " transaction not found in pool: " ) + str ;
2017-04-02 13:17:35 +02:00
failed = true ;
2017-12-07 22:33:20 +01:00
continue ;
2017-04-02 13:17:35 +02:00
}
}
if ( failed )
{
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_sync_info ( const COMMAND_RPC_SYNC_INFO : : request & req , COMMAND_RPC_SYNC_INFO : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-07-02 23:41:15 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_sync_info ) ;
2017-07-02 23:41:15 +02:00
crypto : : hash top_hash ;
2017-09-09 13:06:24 +02:00
m_core . get_blockchain_top ( res . height , top_hash ) ;
2017-07-02 23:41:15 +02:00
+ + res . height ; // turn top block height into blockchain height
res . target_height = m_core . get_target_blockchain_height ( ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . next_needed_pruning_seed = m_p2p . get_payload_object ( ) . get_next_needed_pruning_stripe ( ) . second ;
2017-07-02 23:41:15 +02:00
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
res . peers . push_back ( { c } ) ;
const cryptonote : : block_queue & block_queue = m_p2p . get_payload_object ( ) . get_block_queue ( ) ;
block_queue . foreach ( [ & ] ( const cryptonote : : block_queue : : span & span ) {
2017-11-18 00:52:50 +01:00
const std : : string span_connection_id = epee : : string_tools : : pod_to_hex ( span . connection_id ) ;
2017-07-02 23:41:15 +02:00
uint32_t speed = ( uint32_t ) ( 100.0f * block_queue . get_speed ( span . connection_id ) + 0.5f ) ;
std : : string address = " " ;
for ( const auto & c : m_p2p . get_payload_object ( ) . get_connections ( ) )
2017-11-18 00:52:50 +01:00
if ( c . connection_id = = span_connection_id )
2017-07-02 23:41:15 +02:00
address = c . address ;
2017-11-18 00:52:50 +01:00
res . spans . push_back ( { span . start_block_height , span . nblocks , span_connection_id , ( uint32_t ) ( span . rate + 0.5f ) , speed , span . size , address } ) ;
2017-07-02 23:41:15 +02:00
return true ;
} ) ;
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
res . overview = block_queue . get_overview ( res . height ) ;
2017-07-02 23:41:15 +02:00
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_txpool_backlog ( const COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : request & req , COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2017-08-26 17:23:31 +02:00
{
2017-10-29 22:10:46 +01:00
PERF_TIMER ( on_get_txpool_backlog ) ;
2018-01-20 11:38:14 +01:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_TRANSACTION_POOL_BACKLOG > ( invoke_http_mode : : JON_RPC , " get_txpool_backlog " , req , res , r ) )
return r ;
2017-08-26 17:23:31 +02:00
if ( ! m_core . get_txpool_backlog ( res . backlog ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get txpool backlog " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_distribution ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
2018-02-19 12:15:15 +01:00
{
2018-04-08 14:23:49 +02:00
PERF_TIMER ( on_get_output_distribution ) ;
2018-05-22 15:46:30 +02:00
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : JON_RPC , " get_output_distribution " , req , res , r ) )
return r ;
2018-02-19 12:15:15 +01:00
try
{
2018-10-19 11:20:03 +02:00
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
2018-02-19 12:15:15 +01:00
for ( uint64_t amount : req . amounts )
{
2018-11-06 15:23:26 +01:00
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , req . cumulative ) ;
2018-10-20 04:06:03 +02:00
if ( ! data )
2018-02-19 12:15:15 +01:00
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
2018-10-20 04:06:03 +02:00
error_resp . message = " Failed to get output distribution " ;
2018-02-19 12:15:15 +01:00
return false ;
}
2018-05-31 17:53:56 +02:00
2018-11-08 19:26:59 +01:00
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
2018-02-19 12:15:15 +01:00
}
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_get_output_distribution_bin ( const COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : request & req , COMMAND_RPC_GET_OUTPUT_DISTRIBUTION : : response & res , const connection_context * ctx )
2018-11-08 19:26:59 +01:00
{
PERF_TIMER ( on_get_output_distribution_bin ) ;
bool r ;
if ( use_bootstrap_daemon_if_necessary < COMMAND_RPC_GET_OUTPUT_DISTRIBUTION > ( invoke_http_mode : : BIN , " /get_output_distribution.bin " , req , res , r ) )
return r ;
res . status = " Failed " ;
if ( ! req . binary )
{
res . status = " Binary only call " ;
return false ;
}
try
{
// 0 is placeholder for the whole chain
const uint64_t req_to_height = req . to_height ? req . to_height : ( m_core . get_current_blockchain_height ( ) - 1 ) ;
for ( uint64_t amount : req . amounts )
{
auto data = rpc : : RpcHandler : : get_output_distribution ( [ this ] ( uint64_t amount , uint64_t from , uint64_t to , uint64_t & start_height , std : : vector < uint64_t > & distribution , uint64_t & base ) { return m_core . get_output_distribution ( amount , from , to , start_height , distribution , base ) ; } , amount , req . from_height , req_to_height , req . cumulative ) ;
if ( ! data )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . distributions . push_back ( { std : : move ( * data ) , amount , " " , req . binary , req . compress } ) ;
}
}
catch ( const std : : exception & e )
{
res . status = " Failed to get output distribution " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2019-01-11 20:09:39 +01:00
bool core_rpc_server : : on_prune_blockchain ( const COMMAND_RPC_PRUNE_BLOCKCHAIN : : request & req , COMMAND_RPC_PRUNE_BLOCKCHAIN : : response & res , epee : : json_rpc : : error & error_resp , const connection_context * ctx )
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-30 00:30:51 +02:00
{
try
{
if ( ! ( req . check ? m_core . check_blockchain_pruning ( ) : m_core . prune_blockchain ( ) ) )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = req . check ? " Failed to check blockchain pruning " : " Failed to prune blockchain " ;
return false ;
}
res . pruning_seed = m_core . get_blockchain_pruning_seed ( ) ;
}
catch ( const std : : exception & e )
{
error_resp . code = CORE_RPC_ERROR_CODE_INTERNAL_ERROR ;
error_resp . message = " Failed to prune blockchain " ;
return false ;
}
res . status = CORE_RPC_STATUS_OK ;
return true ;
}
//------------------------------------------------------------------------------------------------------------------------------
2018-02-19 12:15:15 +01:00
2015-01-29 23:10:53 +01:00
2018-02-16 12:04:04 +01:00
const command_line : : arg_descriptor < std : : string , false , true , 2 > core_rpc_server : : arg_rpc_bind_port = {
2015-01-29 23:10:53 +01:00
" rpc-bind-port "
, " Port for RPC server "
, std : : to_string ( config : : RPC_DEFAULT_PORT )
2018-02-16 12:04:04 +01:00
, { { & cryptonote : : arg_testnet_on , & cryptonote : : arg_stagenet_on } }
2018-03-27 15:47:57 +02:00
, [ ] ( std : : array < bool , 2 > testnet_stagenet , bool defaulted , std : : string val ) - > std : : string {
2018-02-16 12:04:04 +01:00
if ( testnet_stagenet [ 0 ] & & defaulted )
2018-01-22 02:49:51 +01:00
return std : : to_string ( config : : testnet : : RPC_DEFAULT_PORT ) ;
2018-02-16 12:04:04 +01:00
else if ( testnet_stagenet [ 1 ] & & defaulted )
return std : : to_string ( config : : stagenet : : RPC_DEFAULT_PORT ) ;
2018-01-22 02:49:51 +01:00
return val ;
}
2015-01-29 23:10:53 +01:00
} ;
2017-11-16 04:58:11 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_restricted_bind_port = {
" rpc-restricted-bind-port "
, " Port for restricted RPC server "
, " "
} ;
2015-11-27 19:24:29 +01:00
const command_line : : arg_descriptor < bool > core_rpc_server : : arg_restricted_rpc = {
" restricted-rpc "
2017-11-08 13:06:41 +01:00
, " Restrict RPC to view only commands and do not return privacy sensitive data in RPC calls "
2015-11-27 19:24:29 +01:00
, false
} ;
2018-01-20 11:38:14 +01:00
epee: add SSL support
RPC connections now have optional tranparent SSL.
An optional private key and certificate file can be passed,
using the --{rpc,daemon}-ssl-private-key and
--{rpc,daemon}-ssl-certificate options. Those have as
argument a path to a PEM format private private key and
certificate, respectively.
If not given, a temporary self signed certificate will be used.
SSL can be enabled or disabled using --{rpc}-ssl, which
accepts autodetect (default), disabled or enabled.
Access can be restricted to particular certificates using the
--rpc-ssl-allowed-certificates, which takes a list of
paths to PEM encoded certificates. This can allow a wallet to
connect to only the daemon they think they're connected to,
by forcing SSL and listing the paths to the known good
certificates.
To generate long term certificates:
openssl genrsa -out /tmp/KEY 4096
openssl req -new -key /tmp/KEY -out /tmp/REQ
openssl x509 -req -days 999999 -sha256 -in /tmp/REQ -signkey /tmp/KEY -out /tmp/CERT
/tmp/KEY is the private key, and /tmp/CERT is the certificate,
both in PEM format. /tmp/REQ can be removed. Adjust the last
command to set expiration date, etc, as needed. It doesn't
make a whole lot of sense for monero anyway, since most servers
will run with one time temporary self signed certificates anyway.
SSL support is transparent, so all communication is done on the
existing ports, with SSL autodetection. This means you can start
using an SSL daemon now, but you should not enforce SSL yet or
nothing will talk to you.
2018-06-15 00:44:48 +02:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl = {
" rpc-ssl "
, " Enable SSL on RPC connections: enabled|disabled|autodetect "
, " autodetect "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl_private_key = {
" rpc-ssl-private-key "
, " Path to a PEM format private key "
, " "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_rpc_ssl_certificate = {
" rpc-ssl-certificate "
, " Path to a PEM format certificate "
, " "
} ;
const command_line : : arg_descriptor < std : : vector < std : : string > > core_rpc_server : : arg_rpc_ssl_allowed_certificates = {
" rpc-ssl-allowed-certificates "
, " List of paths to PEM format certificates of allowed peers (all allowed if empty) "
} ;
const command_line : : arg_descriptor < bool > core_rpc_server : : arg_rpc_ssl_allow_any_cert = {
" rpc-ssl-allow-any-cert "
, " Allow any peer certificate, rather than just those on the allowed list "
, false
} ;
2018-01-20 11:38:14 +01:00
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_address = {
" bootstrap-daemon-address "
, " URL of a 'bootstrap' remote daemon that the connected wallets can use while this daemon is still not fully synced "
, " "
} ;
const command_line : : arg_descriptor < std : : string > core_rpc_server : : arg_bootstrap_daemon_login = {
" bootstrap-daemon-login "
, " Specify username:password for the bootstrap daemon login "
, " "
} ;
2015-01-29 23:10:53 +01:00
} // namespace cryptonote