oxen-core/src/cryptonote_core/blockchain.h

1514 lines
62 KiB
C
Raw Normal View History

// Copyright (c) 2014-2019, The Monero Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
#pragma once
#include <boost/asio/io_service.hpp>
#include <boost/serialization/serialization.hpp>
#include <boost/serialization/version.hpp>
// Workaround for boost::serialization issue #219
#include <boost/version.hpp>
#if BOOST_VERSION == 107400
#include <boost/serialization/library_version_type.hpp>
#endif
#include <boost/serialization/list.hpp>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/global_fun.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <atomic>
#include <functional>
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include "epee/span.h"
#include "epee/string_tools.h"
#include "epee/rolling_median.h"
#include "cryptonote_basic/cryptonote_basic.h"
#include "common/util.h"
#include "cryptonote_protocol/cryptonote_protocol_defs.h"
#include "rpc/core_rpc_server_commands_defs.h"
#include "cryptonote_basic/difficulty.h"
#include "cryptonote_tx_utils.h"
#include "cryptonote_basic/verification_context.h"
#include "crypto/hash.h"
2017-09-10 18:35:59 +02:00
#include "checkpoints/checkpoints.h"
#include "blockchain_db/blockchain_db.h"
#include "blockchain_db/sqlite/db_sqlite.h"
2021-01-04 01:09:45 +01:00
#include "cryptonote_core/oxen_name_system.h"
#include "pulse.h"
struct sqlite3;
namespace service_nodes { class service_node_list; };
namespace tools { class Notify; }
Service Node Deregister Part 5 (#89) * Retrieve quorum list from height, reviewed * Setup data structures for de/register TX * Submit and validate partial/full deregisters * Add P2P relaying of partial deregistration votes * Code review adjustments for deregistration part 1 - Fix check_tx_semantic - Remove signature_pod as votes are now stored as blobs. Serialization overrides don't intefere with crypto::signature anymore. * deregistration_vote_pool - changed sign/verify interface and removed repeated code * Misc review, fix sign/verify api, vote threshold * Deregister/tx edge case handling for combinatoric votes * core, service_node_list: separated address from service node pubkey * Retrieve quorum list from height, reviewed * Setup data structures for de/register TX * Submit and validate partial/full deregisters * Add P2P relaying of partial deregistration votes * Code review adjustments for deregistration part 1 - Fix check_tx_semantic - Remove signature_pod as votes are now stored as blobs. Serialization overrides don't intefere with crypto::signature anymore. * deregistration_vote_pool - changed sign/verify interface and removed repeated code * Misc review, fix sign/verify api, vote threshold * Deregister/tx edge case handling for combinatoric votes * Store service node lists for the duration of deregister lifetimes * Quorum min/max bug, sort node list, fix node to test list * Change quorum to store acc pub address, fix oob bug * Code review for expiring votes, acc keys to pub_key, improve err msgs * Add early out for is_deregistration_tx and protect against quorum changes * Remove debug code, fix segfault * Remove irrelevant check for tx v3 in blockchain, fix >= height for pruning quorum states Incorrect assumption that a transaction can be kept in the chain if it could eventually become invalid, because if it were the chain would be split and eventually these transaction would be dropped. But also that we should not override the pre-existing logic which handles this case anyway.
2018-07-18 04:42:47 +02:00
namespace cryptonote
{
struct block_and_checkpoint
{
cryptonote::block block;
checkpoint_t checkpoint;
bool checkpointed;
};
class tx_memory_pool;
Generic burn fee checking + blink burn fee checking This adds the ability for check_fee() to also check the burn amount. This requires passing extra info through `add_tx()` (and the various things that call it), so I took the: bool keeped_by_block, bool relayed, bool do_not_relay argument triplet, moved it into a struct in tx_pool.h, then added the other fee options there (along with some static factory functions for generating the typical sets of option). The majority of this commit is chasing that change through the codebase and test suite. This is used by blink but should also help LNS and other future burn transactions to verify a burn amount simply when adding the transation to the mempool. It supports a fixed burn amount, a burn amount as a multiple of the minimum tx fee, and also allows you to increase the minimum tx fee (so that, for example, we could require blink txes to pay miners 250% of the usual minimum (unimportant) priority tx fee. - Removed a useless core::add_new_tx() overload that wasn't used anywhere. Blink-specific changes: (I'd normally separate these into a separate commit, but they got interwoven fairly heavily with the above change). - changed the way blink burning is specified so that we have three knobs for fee adjustment (fixed burn fee; base fee multiple; and required miner tx fee). The fixed amount is currently 0, base fee is 400%, and require miner tx fee is simply 100% (i.e. no different than a normal transaction). This is the same as before this commit, but is changing how they are being specified in cryptonote_config.h. - blink tx fee, burn amount, and miner tx fee (if > 100%) now get checked before signing a blink tx. (These fee checks don't apply to anyone else -- when propagating over the network only the miner tx fee is checked). - Added a couple of checks for blink quorums: 1) make sure they have reached the blink hf; 2) make sure the submitted tx version conforms to the current hf min/max tx version. - print blink fee information in simplewallet's `fee` output - add "typical" fee calculations in the `fee` output: [wallet T6SCwL (has locked stakes)]: fee Current fee is 0.000000850 loki per byte + 0.020000000 loki per output No backlog at priority 1 No backlog at priority 2 No backlog at priority 3 No backlog at priority 4 Current blink fee is 0.000004250 loki per byte + 0.100000000 loki per output Estimated typical small transaction fees: 0.042125000 (unimportant), 0.210625000 (normal), 1.053125000 (elevated), 5.265625000 (priority), 0.210625000 (blink) where "small" here is the same tx size (2500 bytes + 2 outputs) used to estimate backlogs.
2019-11-09 04:14:15 +01:00
struct tx_pool_options;
struct test_options;
/** Declares ways in which the BlockchainDB backend should be told to sync
*
*/
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
enum blockchain_db_sync_mode
{
db_defaultsync, //!< user didn't specify, use db_async
db_sync, //!< handle syncing calls instead of the backing db, synchronously
db_async, //!< handle syncing calls instead of the backing db, asynchronously
db_nosync //!< Leave syncing up to the backing db (safest, but slowest because of disk I/O)
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
};
/**
* @brief Callback routine that returns checkpoints data for specific network type
*
* @param network network type
*
* @return checkpoints data, empty span if there ain't any checkpoints for specific network type
*/
using GetCheckpointsCallback = std::function<std::string_view(cryptonote::network_type network)>;
/************************************************************************/
/* */
/************************************************************************/
class Blockchain
{
public:
/**
* @brief container for passing a block and metadata about it on the blockchain
*/
struct block_extended_info
{
block_extended_info() = default;
block_extended_info(const alt_block_data_t &src, block const &blk, checkpoint_t const *checkpoint);
block bl; //!< the block
bool checkpointed;
checkpoint_t checkpoint;
uint64_t height; //!< the height of the block in the blockchain
uint64_t block_cumulative_weight; //!< the weight of the block
difficulty_type cumulative_difficulty; //!< the accumulated difficulty after that block
uint64_t already_generated_coins; //!< the total coins minted after that block
};
/**
* @brief Blockchain constructor
*
* @param tx_pool a reference to the transaction pool to be kept by the Blockchain
*/
Blockchain(tx_memory_pool& tx_pool, service_nodes::service_node_list& service_node_list);
/**
* @brief Blockchain destructor
*/
~Blockchain();
/**
* @brief Initialize the Blockchain state
*
* @param db a pointer to the backing store to use for the blockchain
2018-02-16 12:04:04 +01:00
* @param nettype network type
* @param offline true if running offline, else false
* @param test_options test parameters
* @param fixed_difficulty fixed difficulty for testing purposes; 0 means disabled
* @param get_checkpoints if set, will be called to get checkpoints data
*
* @return true on success, false if any initialization steps fail
*/
bool init(BlockchainDB* db, sqlite3 *ons_db, std::shared_ptr<cryptonote::BlockchainSQLite> sqlite_db, const network_type nettype = MAINNET, bool offline = false, const cryptonote::test_options *test_options = nullptr, difficulty_type fixed_difficulty = 0, const GetCheckpointsCallback& get_checkpoints = nullptr);
/**
* @brief Uninitializes the blockchain state
*
* Saves to disk any state that needs to be maintained
*
* @return true on success, false if any uninitialization steps fail
*/
bool deinit();
bool get_blocks_only(uint64_t start_offset, size_t count, std::vector<block>& blocks, std::vector<cryptonote::blobdata> *txs = nullptr) const;
/**
* @brief get blocks and transactions from blocks based on start height and count
*
* @param start_offset the height on the blockchain to start at
* @param count the number of blocks to get, if there are as many after start_offset
* @param blocks return-by-reference container to put result blocks in
* @param txs return-by-reference container to put result transactions in
*
* @return false if start_offset > blockchain height, else true
*/
bool get_blocks(uint64_t start_offset, size_t count, std::vector<std::pair<cryptonote::blobdata,block>>& blocks, std::vector<cryptonote::blobdata>& txs) const;
/**
* @brief get blocks from blocks based on start height and count
*
* @param start_offset the height on the blockchain to start at
* @param count the number of blocks to get, if there are as many after start_offset
* @param blocks return-by-reference container to put result blocks in
*
* @return false if start_offset > blockchain height, else true
*/
bool get_blocks(uint64_t start_offset, size_t count, std::vector<std::pair<cryptonote::blobdata,block>>& blocks) const;
/**
* @brief compiles a list of all blocks stored as alternative chains
*
* @param blocks return-by-reference container to put result blocks in
*
* @return true
*/
bool get_alternative_blocks(std::vector<block>& blocks) const;
/**
* @brief returns the number of alternative blocks stored
*
* @return the number of alternative blocks stored
*/
size_t get_alternative_blocks_count() const;
/**
* @brief gets a block's hash given a height
*
* @param height the height of the block
*
* @return the hash of the block at the requested height, or a zeroed hash if there is no such block
*/
crypto::hash get_block_id_by_height(uint64_t height) const;
/**
* @brief gets a block's hash given a height
*
* Used only by prepare_handle_incoming_blocks. Will look in the list of incoming blocks
* if the height is contained there.
*
* @param height the height of the block
*
* @return the hash of the block at the requested height, or a zeroed hash if there is no such block
*/
crypto::hash get_pending_block_id_by_height(uint64_t height) const;
/**
* @brief gets the block with a given hash
*
* @param h the hash to look for
* @param blk return-by-reference variable to put result block in
* @param orphan if non-NULL, will be set to true if not in the main chain, false otherwise
*
* @return true if the block was found, else false
*/
bool get_block_by_hash(const crypto::hash &h, block &blk, bool *orphan = NULL) const;
/**
* @brief gets the block at the given height
*
* @param h the hash to look for
* @param blk return-by-reference variable to put result block in
*
* @return true if the block was found, else false
*/
bool get_block_by_height(uint64_t height, block &blk) const;
/**
* @brief performs some preprocessing on a group of incoming blocks to speed up verification
*
* @param blocks_entry a list of incoming blocks
* @param blocks the parsed blocks
* @param checkpoints the parsed checkpoints
*
* @return false on erroneous blocks, else true
*/
bool prepare_handle_incoming_blocks(const std::vector<block_complete_entry> &blocks_entry, std::vector<block> &blocks);
/**
* @brief incoming blocks post-processing, cleanup, and disk sync
*
* @param force_sync if true, and Blockchain is handling syncing to disk, always sync
*
* @return true
*/
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
bool cleanup_handle_incoming_blocks(bool force_sync = false);
/**
* @brief search the blockchain for a transaction by hash
*
* @param id the hash to search for
*
* @return true if the tx exists, else false
*/
bool have_tx(const crypto::hash &id) const;
/**
* @brief check if any key image in a transaction has already been spent
*
* @param tx the transaction to check
*
* @return true if any key image is already spent in the blockchain, else false
*/
bool have_tx_keyimges_as_spent(const transaction &tx) const;
/**
* @brief check if a key image is already spent on the blockchain
*
* Whenever a transaction output is used as an input for another transaction
* (a true input, not just one of a mixing set), a key image is generated
* and stored in the transaction in order to prevent double spending. If
* this key image is seen again, the transaction using it is rejected.
*
* @param key_im the key image to search for
*
* @return true if the key image is already spent in the blockchain, else false
*/
bool have_tx_keyimg_as_spent(const crypto::key_image &key_im) const;
/**
* @brief get the current height of the blockchain
*
* @param lock lock the blockchain before querying the lock
*
* @return the height
*/
uint64_t get_current_blockchain_height(bool lock = false) const;
/**
* @brief get the hash of the most recent block on the blockchain
*
* @return the hash
*/
crypto::hash get_tail_id() const;
/**
* @brief get the height and hash of the most recent block on the blockchain
*
* @param height return-by-reference variable to store the height in
*
* @return the hash
*/
crypto::hash get_tail_id(uint64_t& height) const;
/**
* @brief returns the difficulty target the next block to be added must meet
*
* pulse return difficulty for a pulse block
*
* @return the target
*/
difficulty_type get_difficulty_for_next_block(bool pulse);
/**
* @brief adds a block to the blockchain
*
* Adds a new block to the blockchain. If the block's parent is not the
* current top of the blockchain, the block may be added to an alternate
* chain. If the block does not belong, is already in the blockchain
* or an alternate chain, or is invalid, return false.
*
* @param bl the block to be added
* @param bvc metadata about the block addition's success/failure
* @param checkpoint optional checkpoint if there is one associated with the block
*
* @return true on successful addition to the blockchain, else false
*/
bool add_new_block(const block& bl, block_verification_context& bvc, checkpoint_t const *checkpoint);
/**
* @brief clears the blockchain and starts a new one
*
* @param b the first block in the new chain (the genesis block)
*
* @return true on success, else false
*/
bool reset_and_set_genesis_block(const block& b);
/**
* @brief creates a new block to mine against
*
* @param b return-by-reference block to be filled in
* @param from_block optional block hash to start mining from (main chain tip if NULL)
* @param miner_address address new coins for the block will go to
* @param di return-by-reference tells the miner what the difficulty target is
* @param height return-by-reference tells the miner what height it's mining against
* @param expected_reward return-by-reference the total reward awarded to the miner finding this block, including transaction fees
* @param ex_nonce extra data to be added to the miner transaction's extra
*
* @return true if block template filled in successfully, else false
*/
bool create_miner_block_template (block& b, const crypto::hash *from_block, const account_public_address& miner_address, difficulty_type& diffic, uint64_t& height, uint64_t& expected_reward, const blobdata& ex_nonce);
bool create_next_miner_block_template(block& b, const account_public_address& miner_address, difficulty_type& diffic, uint64_t& height, uint64_t& expected_reward, const blobdata& ex_nonce);
/**
* @brief creates the next block suitable for using in a Pulse enabled network
*
* @param b return-by-reference block to be filled in
* @param block_producer the service node that will receive the block reward
* @param round the current pulse round the block is being generated for
* @param validator_bitset the bitset indicating which validators in the quorum are participating in constructing the block.
* @param ex_nonce extra data to be added to the miner transaction's extra
*
* @return true if block template filled in successfully, else false
*/
bool create_next_pulse_block_template(block& b, const service_nodes::payout& block_producer, uint8_t round, uint16_t validator_bitset, uint64_t& height);
/**
* @brief checks if a block is known about with a given hash
*
* This function checks the main chain, alternate chains, and invalid blocks
* for a block with the given hash
*
* @param id the hash to search for
*
* @return true if the block is known, else false
*/
bool have_block(const crypto::hash& id) const;
/**
* @brief gets the total number of transactions on the main chain
*
* @return the number of transactions on the main chain
*/
size_t get_total_transactions() const;
/**
* @brief gets the hashes for a sample of the blockchain
*
* Builds a list of hashes representing certain blocks from the blockchain in reverse
* chronological order for comparison of chains when synchronizing. The sampled blocks include
* the most recent blocks, then drops off exponentially back to the genesis block.
*
* Specifically the blocks chosen for height H, are:
* - the most recent 11 (H-1, H-2, ..., H-10, H-11)
* - base-2 exponential drop off from there, so: H-13, H-17, H-25, etc... (going down to, at smallest, height 1)
* - the genesis block (height 0)
*/
void get_short_chain_history(std::list<crypto::hash>& ids) const;
/**
* @brief get recent block hashes for a foreign chain
*
* Find the split point between us and foreign blockchain and return
* (by reference) the most recent common block hash along with up to
* BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT additional (more recent) hashes.
*
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
* @param hashes the hashes to be returned, return-by-reference
* @param start_height the start height, return-by-reference
* @param current_height the current blockchain height, return-by-reference
* @param clip_pruned whether to constrain results to unpruned data
*
* @return true if a block found in common, else false
*/
bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, std::vector<crypto::hash>& hashes, uint64_t& start_height, uint64_t& current_height, bool clip_pruned) const;
/**
* @brief get recent block hashes for a foreign chain
*
* Find the split point between us and foreign blockchain and return
* (by reference) the most recent common block hash along with up to
* BLOCKS_IDS_SYNCHRONIZING_DEFAULT_COUNT additional (more recent) hashes.
*
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
* @param resp return-by-reference the split height and subsequent blocks' hashes
*
* @return true if a block found in common, else false
*/
bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, NOTIFY_RESPONSE_CHAIN_ENTRY::request& resp) const;
/**
* @brief find the most recent common point between ours and a foreign chain
*
* This function takes a list of block hashes from another node
* on the network to find where the split point is between us and them.
* This is used to see what to send another node that needs to sync.
*
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
* @param starter_offset return-by-reference the most recent common block's height
*
* @return true if a block found in common, else false
*/
bool find_blockchain_supplement(const std::list<crypto::hash>& qblock_ids, uint64_t& starter_offset) const;
/**
* @brief get recent blocks for a foreign chain
*
* This function gets recent blocks relative to a foreign chain, starting either at
* a requested height or whatever height is the most recent ours and the foreign
* chain have in common.
*
* @param req_start_block if non-zero, specifies a start point (otherwise find most recent commonality)
* @param qblock_ids the foreign chain's "short history" (see get_short_chain_history)
* @param blocks return-by-reference the blocks and their transactions
* @param total_height return-by-reference our current blockchain height
* @param start_height return-by-reference the height of the first block returned
* @param pruned whether to return full or pruned tx blobs
* @param max_count the max number of blocks to get
*
* @return true if a block found in common or req_start_block specified, else false
*/
bool find_blockchain_supplement(const uint64_t req_start_block, const std::list<crypto::hash>& qblock_ids, std::vector<std::pair<std::pair<cryptonote::blobdata, crypto::hash>, std::vector<std::pair<crypto::hash, cryptonote::blobdata> > > >& blocks, uint64_t& total_height, uint64_t& start_height, bool pruned, bool get_miner_tx_hash, size_t max_count) const;
/**
* @brief retrieves a set of blocks and their transactions
*
* the request object encapsulates a list of block hashes. for each block hash, the block is
* fetched along with all of that block's transactions.
*
* @param arg the request
* @param rsp return-by-reference the response to fill in
*
* @return true unless any blocks or transactions are missing
*/
bool handle_get_blocks(NOTIFY_REQUEST_GET_BLOCKS::request& arg, NOTIFY_RESPONSE_GET_BLOCKS::request& rsp);
/**
* @brief retrieves a set of transactions
*
* The request object encapsulates a list of tx hashes which are fetched and stored in the
* response object.
*
* @param arg the request
* @param rsp return-by-reference the response to fill in
*
* @return true unless any transactions are missing
*/
bool handle_get_txs(NOTIFY_REQUEST_GET_TXS::request& arg, NOTIFY_NEW_TRANSACTIONS::request& rsp);
/**
* @brief get number of outputs of an amount past the minimum spendable age
*
* @param amount the output amount
*
* @return the number of mature outputs
*/
uint64_t get_num_mature_outputs(uint64_t amount) const;
/**
* @brief get the public key for an output
*
* @param amount the output amount
* @param global_index the output amount-global index
*
* @return the public key
*/
crypto::public_key get_output_key(uint64_t amount, uint64_t global_index) const;
/**
* @brief gets specific outputs to mix with
*
* This function takes an RPC request for outputs to mix with
* and creates an RPC response with the resultant output indices.
*
* Outputs to mix with are specified in the request.
*
* @param req the outputs to return
* @param res return-by-reference the resultant output indices and keys
*
* @return true
*/
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
bool get_outs(const rpc::GET_OUTPUTS_BIN::request& req, rpc::GET_OUTPUTS_BIN::response& res) const;
/**
* @brief gets an output's key and unlocked state
*
* @param amount in - the output amount
* @param index in - the output global amount index
* @param mask out - the output's RingCT mask
* @param key out - the output's key
* @param unlocked out - the output's unlocked state
*/
void get_output_key_mask_unlocked(const uint64_t& amount, const uint64_t& index, crypto::public_key& key, rct::key& mask, bool& unlocked) const;
/**
* @brief gets per block distribution of outputs of a given amount
*
* @param amount the amount to get a ditribution for
* @param from_height the height before which we do not care about the data
* @param to_height the height after which we do not care about the data
* @param return-by-reference start_height the height of the first rct output
* @param return-by-reference distribution the start offset of the first rct output in this block (same as previous if none)
* @param return-by-reference base how many outputs of that amount are before the stated distribution
*/
bool get_output_distribution(uint64_t amount, uint64_t from_height, uint64_t to_height, uint64_t &start_height, std::vector<uint64_t> &distribution, uint64_t &base) const;
Infinite Staking Part 2 (#406) * Cleanup and undoing some protocol breakages * Simplify expiration of nodes * Request unlock schedules entire node for expiration * Fix off by one in expiring nodes * Undo expiring code for pre v10 nodes * Fix RPC returning register as unlock height and not checking 0 * Rename key image unlock height const * Undo testnet hardfork debug changes * Remove is_type for get_type, fix missing var rename * Move serialisable data into public namespace * Serialise tx types properly * Fix typo in no service node known msg * Code review * Fix == to >= on serialising tx type * Code review 2 * Fix tests and key image unlock * Add command to print locked key images * Update ui to display lock stakes, query in print cmd blacklist * Modify print stakes to be less slow * Remove autostaking code * Refactor staking into sweep functions It appears staking was derived off stake_main written separately at implementation at the beginning. This merges them back into a common code path, after removing autostake there's only some minor differences. It also makes sure that any changes to sweeping upstream are going to be considered in the staking process which we want. * Display unlock height for stakes * Begin creating output blacklist * Make blacklist output a migration step * Implement get_output_blacklist for lmdb * In wallet output selection ignore blacklisted outputs * Apply blacklisted outputs to output selection * Fix broken tests, switch key image unlock * Fix broken unit_tests * Begin change to limit locked key images to 4 globally * Revamp prepare registration for new min contribution rules * Fix up old back case in prepare registration * Remove debug code * Cleanup debug code and some unecessary changes * Fix migration step on mainnet db * Fix blacklist outputs for pre-existing DB's * Remove irrelevant note * Tweak scanning addresses for locked stakes Since we only now allow contributions from the primary address we can skip checking all subaddress + lookahead to speed up wallet scanning * Define macro for SCNu64 for Mingw * Fix failure on empty DB * Add missing error msg, remove contributor from stake * Improve staking messages * Flush prompt to always display * Return the msg from stake failure and fix stake parsing error * Tweak fork rules for smaller bulletproofs * Tweak pooled nodes minimum amounts * Fix crash on exit, there's no need to store on destructor Since all information about service nodes is derived from the blockchain and we store state every time we receive a block, storing in the destructor is redundant as there is no new information to store. * Make prompt be consistent with CLI * Check max number of key images from per user to node * Implement error message on get_output_blacklist failure * Remove resolved TODO's/comments * Handle infinite staking in print_sn * Atoi->strtol, fix prepare_registration, virtual override, stale msgs
2019-02-14 02:12:57 +01:00
/**
* @brief gets global output indexes that should not be used, i.e. registration tx outputs
*
* @param return-by-reference blacklist global indexes of rct outputs to ignore
*/
void get_output_blacklist(std::vector<uint64_t> &blacklist) const;
Infinite Staking Part 2 (#406) * Cleanup and undoing some protocol breakages * Simplify expiration of nodes * Request unlock schedules entire node for expiration * Fix off by one in expiring nodes * Undo expiring code for pre v10 nodes * Fix RPC returning register as unlock height and not checking 0 * Rename key image unlock height const * Undo testnet hardfork debug changes * Remove is_type for get_type, fix missing var rename * Move serialisable data into public namespace * Serialise tx types properly * Fix typo in no service node known msg * Code review * Fix == to >= on serialising tx type * Code review 2 * Fix tests and key image unlock * Add command to print locked key images * Update ui to display lock stakes, query in print cmd blacklist * Modify print stakes to be less slow * Remove autostaking code * Refactor staking into sweep functions It appears staking was derived off stake_main written separately at implementation at the beginning. This merges them back into a common code path, after removing autostake there's only some minor differences. It also makes sure that any changes to sweeping upstream are going to be considered in the staking process which we want. * Display unlock height for stakes * Begin creating output blacklist * Make blacklist output a migration step * Implement get_output_blacklist for lmdb * In wallet output selection ignore blacklisted outputs * Apply blacklisted outputs to output selection * Fix broken tests, switch key image unlock * Fix broken unit_tests * Begin change to limit locked key images to 4 globally * Revamp prepare registration for new min contribution rules * Fix up old back case in prepare registration * Remove debug code * Cleanup debug code and some unecessary changes * Fix migration step on mainnet db * Fix blacklist outputs for pre-existing DB's * Remove irrelevant note * Tweak scanning addresses for locked stakes Since we only now allow contributions from the primary address we can skip checking all subaddress + lookahead to speed up wallet scanning * Define macro for SCNu64 for Mingw * Fix failure on empty DB * Add missing error msg, remove contributor from stake * Improve staking messages * Flush prompt to always display * Return the msg from stake failure and fix stake parsing error * Tweak fork rules for smaller bulletproofs * Tweak pooled nodes minimum amounts * Fix crash on exit, there's no need to store on destructor Since all information about service nodes is derived from the blockchain and we store state every time we receive a block, storing in the destructor is redundant as there is no new information to store. * Make prompt be consistent with CLI * Check max number of key images from per user to node * Implement error message on get_output_blacklist failure * Remove resolved TODO's/comments * Handle infinite staking in print_sn * Atoi->strtol, fix prepare_registration, virtual override, stale msgs
2019-02-14 02:12:57 +01:00
/**
* @brief gets the global indices for outputs from a given transaction
*
* This function gets the global indices for all outputs belonging
* to a specific transaction.
*
* @param tx_id the hash of the transaction to fetch indices for
* @param indexs return-by-reference the global indices for the transaction's outputs
* @param n_txes how many txes in a row to get results for
*
* @return false if the transaction does not exist, or if no indices are found, otherwise true
*/
bool get_tx_outputs_gindexs(const crypto::hash& tx_id, std::vector<uint64_t>& indexs) const;
bool get_tx_outputs_gindexs(const crypto::hash& tx_id, size_t n_txes, std::vector<std::vector<uint64_t>>& indexs) const;
/**
* @brief stores the blockchain
*
* If Blockchain is handling storing of the blockchain (rather than BlockchainDB),
* this initiates a blockchain save.
*
* @return true unless saving the blockchain fails
*/
bool store_blockchain();
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
/**
* @brief validates a transaction's inputs
*
* validates a transaction's inputs as correctly used and not previously
* spent. also returns the hash and height of the most recent block
* which contains an output that was used as an input to the transaction.
* The transaction's rct signatures, if any, are expanded.
*
* @param tx the transaction to validate
* @param pmax_used_block_height return-by-reference block height of most recent input
* @param max_used_block_id return-by-reference block hash of most recent input
* @param tvc returned information about tx verification
* @param kept_by_block whether or not the transaction is from a previously-verified block
* @param key_image_conflicts if specified then don't fail on duplicate key images but instead add them here for the caller to decide on
*
* @return false if any input is invalid, otherwise true
*/
bool check_tx_inputs(transaction& tx, uint64_t& pmax_used_block_height, crypto::hash& max_used_block_id, tx_verification_context &tvc, bool kept_by_block = false, std::unordered_set<crypto::key_image>* key_image_conflicts = nullptr);
/**
* @brief get fee quantization mask
*
* The dynamic fee may be quantized, to mask out the last decimal places
*
* @return the fee quantized mask
*/
static uint64_t get_fee_quantization_mask();
/**
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* @brief get dynamic per-kB or -byte fee and the per-output fee for a given
* block weight and hard fork version.
*
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* The dynamic per-byte fee is based on the block weight in a past window,
* and the current block reward. It is expressed as a per-kiB value before
* v8, and per byte from v8.
*
* The per-output fee is a fixed amount per output created in the
* transaction beginning in Loki hard fork 13 and will be 0 before v13.
*
* @param block_reward the current block reward
* @param median_block_weight the median block weight in the past window
* @param version hard fork version for rules and constants to use
*
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* @return pair of {per-size, per-output} fees
*/
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
static byte_and_output_fees get_dynamic_base_fee(uint64_t block_reward, size_t median_block_weight, uint8_t version);
/**
* @brief get dynamic per kB or byte fee estimate for the next few blocks
*
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* Returns an estimate of the get_dynamic_base_fee() value that will be
* valid for the next `grace_blocks` blocks.
*
* @param grace_blocks number of blocks we want the fee to be valid for
*
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* @return the {per-size, per-output} fee estimate
*/
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
byte_and_output_fees get_dynamic_base_fee_estimate(uint64_t grace_blocks) const;
/**
* @brief validate a transaction's fee
*
* This function validates the fee is enough for the transaction.
* This is based on the weight of the transaction, and, after a
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* height threshold, on the average weight of transaction in a past window.
* From Loki v13 the amount must also include a per-output-created fee.
*
* @param tx_weight the transaction weight
Switch to per-byte + per-output fees, reduce max multiplier This switches loki 5.x to use a fee formula of SIZE * PER_BYTE + OUTPUTS * PER_OUTPUT where we reduce the PER_BYTE fee back to what it was in 3.x; and with the PER_OUTPUT fee set to 0.02 LOKI. This compares to the 4.x fee of: SIZE * PER_BYTE * 80 (the *80 multiple was introduced in 4.x). It also reduces the multiplier for the maximum priority level to 125 instead of 1000 because 1000 produced uselessly high tx fees. The new multipliers go up 5x at each level: {1, 5, 25, 125} while previously they went {1, 5, 25, 1000}. As for the base change: we added the *80 multiplier in 4.x because we wanted to make a theoretical de-anonymizing tx spam attack more costly. The unanticipated consequence was that we also made *large* transactions (such as sweeps) considerably more costly despite the fact that these transactions typically only create 2 outputs. This better captures what we meant to do in 4.x (making output creation relatively more expensive) without making large txes (e.g. sweeps required for staking) highly expensive. The end effect is that the fee for a minimum-sized, 1-input/2-output transaction should stay roughly the same (slightly over 0.04 LOKI), while a 100-input/2-output transction (a typical spend or sweep from a wallet with lots of smaller rewards) will drop in fee by somewhere around 95%. The most efficient theoretical deanonymizing tx spamming of this sort was a 1-input/16-output transaction which will become about 2.5x as expensive as currently the case in v4.x.
2019-08-26 19:35:26 +02:00
* @param tx_outs the number of outputs created in the transaction
Generic burn fee checking + blink burn fee checking This adds the ability for check_fee() to also check the burn amount. This requires passing extra info through `add_tx()` (and the various things that call it), so I took the: bool keeped_by_block, bool relayed, bool do_not_relay argument triplet, moved it into a struct in tx_pool.h, then added the other fee options there (along with some static factory functions for generating the typical sets of option). The majority of this commit is chasing that change through the codebase and test suite. This is used by blink but should also help LNS and other future burn transactions to verify a burn amount simply when adding the transation to the mempool. It supports a fixed burn amount, a burn amount as a multiple of the minimum tx fee, and also allows you to increase the minimum tx fee (so that, for example, we could require blink txes to pay miners 250% of the usual minimum (unimportant) priority tx fee. - Removed a useless core::add_new_tx() overload that wasn't used anywhere. Blink-specific changes: (I'd normally separate these into a separate commit, but they got interwoven fairly heavily with the above change). - changed the way blink burning is specified so that we have three knobs for fee adjustment (fixed burn fee; base fee multiple; and required miner tx fee). The fixed amount is currently 0, base fee is 400%, and require miner tx fee is simply 100% (i.e. no different than a normal transaction). This is the same as before this commit, but is changing how they are being specified in cryptonote_config.h. - blink tx fee, burn amount, and miner tx fee (if > 100%) now get checked before signing a blink tx. (These fee checks don't apply to anyone else -- when propagating over the network only the miner tx fee is checked). - Added a couple of checks for blink quorums: 1) make sure they have reached the blink hf; 2) make sure the submitted tx version conforms to the current hf min/max tx version. - print blink fee information in simplewallet's `fee` output - add "typical" fee calculations in the `fee` output: [wallet T6SCwL (has locked stakes)]: fee Current fee is 0.000000850 loki per byte + 0.020000000 loki per output No backlog at priority 1 No backlog at priority 2 No backlog at priority 3 No backlog at priority 4 Current blink fee is 0.000004250 loki per byte + 0.100000000 loki per output Estimated typical small transaction fees: 0.042125000 (unimportant), 0.210625000 (normal), 1.053125000 (elevated), 5.265625000 (priority), 0.210625000 (blink) where "small" here is the same tx size (2500 bytes + 2 outputs) used to estimate backlogs.
2019-11-09 04:14:15 +01:00
* @param fee the miner tx fee (any burned component must already be removed, such as is done by
* get_tx_miner_fee)
* @param burned the amount of coin burned in this tx
* @param opts transaction pool options used to pass through required fee/burn amounts
*
* @return true if the fee is enough, false otherwise
*/
Generic burn fee checking + blink burn fee checking This adds the ability for check_fee() to also check the burn amount. This requires passing extra info through `add_tx()` (and the various things that call it), so I took the: bool keeped_by_block, bool relayed, bool do_not_relay argument triplet, moved it into a struct in tx_pool.h, then added the other fee options there (along with some static factory functions for generating the typical sets of option). The majority of this commit is chasing that change through the codebase and test suite. This is used by blink but should also help LNS and other future burn transactions to verify a burn amount simply when adding the transation to the mempool. It supports a fixed burn amount, a burn amount as a multiple of the minimum tx fee, and also allows you to increase the minimum tx fee (so that, for example, we could require blink txes to pay miners 250% of the usual minimum (unimportant) priority tx fee. - Removed a useless core::add_new_tx() overload that wasn't used anywhere. Blink-specific changes: (I'd normally separate these into a separate commit, but they got interwoven fairly heavily with the above change). - changed the way blink burning is specified so that we have three knobs for fee adjustment (fixed burn fee; base fee multiple; and required miner tx fee). The fixed amount is currently 0, base fee is 400%, and require miner tx fee is simply 100% (i.e. no different than a normal transaction). This is the same as before this commit, but is changing how they are being specified in cryptonote_config.h. - blink tx fee, burn amount, and miner tx fee (if > 100%) now get checked before signing a blink tx. (These fee checks don't apply to anyone else -- when propagating over the network only the miner tx fee is checked). - Added a couple of checks for blink quorums: 1) make sure they have reached the blink hf; 2) make sure the submitted tx version conforms to the current hf min/max tx version. - print blink fee information in simplewallet's `fee` output - add "typical" fee calculations in the `fee` output: [wallet T6SCwL (has locked stakes)]: fee Current fee is 0.000000850 loki per byte + 0.020000000 loki per output No backlog at priority 1 No backlog at priority 2 No backlog at priority 3 No backlog at priority 4 Current blink fee is 0.000004250 loki per byte + 0.100000000 loki per output Estimated typical small transaction fees: 0.042125000 (unimportant), 0.210625000 (normal), 1.053125000 (elevated), 5.265625000 (priority), 0.210625000 (blink) where "small" here is the same tx size (2500 bytes + 2 outputs) used to estimate backlogs.
2019-11-09 04:14:15 +01:00
bool check_fee(size_t tx_weight, size_t tx_outs, uint64_t fee, uint64_t burned, const tx_pool_options &opts) const;
/**
* @brief check that a transaction's outputs conform to current standards
*
* @param tx the transaction to check the outputs of
* @param tvc returned info about tx verification
*
* @return false if any outputs do not conform, otherwise true
*/
bool check_tx_outputs(const transaction& tx, tx_verification_context &tvc) const;
/**
* @brief gets the block weight limit based on recent blocks
*
* @return the limit
*/
uint64_t get_current_cumulative_block_weight_limit() const;
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
/**
* @brief gets the long term block weight for a new block
*
* @return the long term block weight
*/
uint64_t get_next_long_term_block_weight(uint64_t block_weight) const;
/**
* @brief gets the block weight median based on recent blocks (same window as for the limit)
*
* @return the median
*/
uint64_t get_current_cumulative_block_weight_median() const;
/**
* @brief gets the difficulty of the block with a given height
*
* @param i the height
*
* @return the difficulty
*/
uint64_t block_difficulty(uint64_t i) const;
/**
* @brief gets blocks based on a list of block hashes
*
* @param block_ids a vector of block hashes for which to get the corresponding blocks
* @param blocks return-by-reference a vector to store result blocks in
* @param missed_bs return-by-reference a vector to store missed blocks in
*
* @return false if an unexpected exception occurs, else true
*/
bool get_blocks(const std::vector<crypto::hash>& block_ids, std::vector<std::pair<cryptonote::blobdata,block>>& blocks, std::vector<crypto::hash>& missed_bs) const;
/**
* @brief gets transactions based on a list of transaction hashes
*
* @param txs_ids a vector of hashes for which to get the corresponding transactions
* @param txs return-by-reference a vector to store result transactions in
* @param missed_txs return-by-reference a vector to store missed transactions in
* @param pruned whether to return full or pruned blobs
*
* @return false if an unexpected exception occurs, else true
*/
bool get_transactions_blobs(const std::vector<crypto::hash>& txs_ids, std::vector<blobdata>& txs, std::vector<crypto::hash>& missed_txs, bool pruned = false) const;
bool get_split_transactions_blobs(const std::vector<crypto::hash>& txs_ids, std::vector<std::tuple<crypto::hash, cryptonote::blobdata, crypto::hash, cryptonote::blobdata>>& txs, std::vector<crypto::hash>& missed_txs) const;
bool get_transactions(const std::vector<crypto::hash>& txs_ids, std::vector<transaction>& txs, std::vector<crypto::hash>& missed_txs) const;
/**
* @brief looks up transactions based on a list of transaction hashes and returns the block
* height in which they were mined, or 0 if not found on the blockchain.
*
* @param txs_ids vector of hashes to look up
*
* @return vector of the same length as txs_ids containing the heights corresponding to the
* given hashes, or 0 if not found.
*/
std::vector<uint64_t> get_transactions_heights(const std::vector<crypto::hash>& txs_ids) const;
//debug functions
/**
2019-05-01 08:01:17 +02:00
* @brief loads new checkpoints from a file
*
* @param file_path the path of the file to look for and load checkpoints from
*
* @return false if any enforced checkpoint type fails to load, otherwise true
*/
bool update_checkpoints_from_json_file(const fs::path& file_path);
bool update_checkpoint(checkpoint_t const &checkpoint);
bool get_checkpoint(uint64_t height, checkpoint_t &checkpoint) const;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
// user options, must be called before calling init()
/**
* @brief sets various performance options
*
* @param maxthreads max number of threads when preparing blocks for addition
* @param sync_on_blocks whether to sync based on blocks or bytes
* @param sync_threshold number of blocks/bytes to cache before syncing to database
* @param sync_mode the ::blockchain_db_sync_mode to use
* @param fast_sync sync using built-in block hashes as trusted
*/
void set_user_options(uint64_t maxthreads, bool sync_on_blocks, uint64_t sync_threshold,
blockchain_db_sync_mode sync_mode, bool fast_sync);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
/**
* @brief sets a block notify object to call for every new block
*
* @param notify the notify object to call at every new block
*/
void set_block_notify(const std::shared_ptr<tools::Notify> &notify) { m_block_notify = notify; }
/**
* @brief sets a reorg notify object to call for every reorg
*
* @param notify the notify object to call at every reorg
*/
void set_reorg_notify(const std::shared_ptr<tools::Notify> &notify) { m_reorg_notify = notify; }
/**
* @brief Put DB in safe sync mode
*/
void safesyncmode(const bool onoff);
/**
* @brief Get the nettype
* @return the nettype
*/
network_type nettype() const { return m_nettype; }
/**
* @brief set whether or not to show/print time statistics
*
* @param stats the new time stats setting
*/
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
void set_show_time_stats(bool stats) { m_show_time_stats = stats; }
/**
* @brief gets the network hard fork version of the blockchain at the given height.
* If height is omitted, uses the current blockchain height.
2016-08-12 20:19:25 +02:00
*
* @return the version
*/
uint8_t get_network_version(std::optional<uint64_t> height = std::nullopt) const;
/**
* @brief remove transactions from the transaction pool (if present)
*
* @param txids a list of hashes of transactions to be removed
*
* @return false if any removals fail, otherwise true
*/
bool flush_txes_from_pool(const std::vector<crypto::hash> &txids);
/**
* @brief return a histogram of outputs on the blockchain
*
* @param amounts optional set of amounts to lookup
* @param unlocked whether to restrict instances to unlocked ones
* @param recent_cutoff timestamp to consider outputs as recent
* @param min_count return only amounts with at least that many instances
*
* @return a set of amount/instances
*/
std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t>> get_output_histogram(const std::vector<uint64_t> &amounts, bool unlocked, uint64_t recent_cutoff, uint64_t min_count = 0) const;
/**
* @brief perform a check on all key images in the blockchain
*
* @param std::function the check to perform, pass/fail
*
* @return false if any key image fails the check, otherwise true
*/
bool for_all_key_images(std::function<bool(const crypto::key_image&)>) const;
/**
* @brief perform a check on all blocks in the blockchain in the given range
*
* @param h1 the start height
* @param h2 the end height
* @param std::function the check to perform, pass/fail
*
* @return false if any block fails the check, otherwise true
*/
bool for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function<bool(uint64_t, const crypto::hash&, const block&)>) const;
/**
* @brief perform a check on all transactions in the blockchain
*
* @param std::function the check to perform, pass/fail
* @param bool pruned whether to return pruned txes only
*
* @return false if any transaction fails the check, otherwise true
*/
bool for_all_transactions(std::function<bool(const crypto::hash&, const cryptonote::transaction&)>, bool pruned) const;
/**
* @brief perform a check on all outputs in the blockchain
*
* @param std::function the check to perform, pass/fail
*
* @return false if any output fails the check, otherwise true
*/
bool for_all_outputs(std::function<bool(uint64_t amount, const crypto::hash &tx_hash, uint64_t height, size_t tx_idx)>) const;
/**
* @brief perform a check on all outputs of a given amount in the blockchain
*
* @param amount the amount to iterate through
* @param std::function the check to perform, pass/fail
*
* @return false if any output fails the check, otherwise true
*/
bool for_all_outputs(uint64_t amount, std::function<bool(uint64_t height)>) const;
/// Returns true if we have a BlockchainDB reference, i.e. if we have been initialized
bool has_db() const { return m_db; }
/**
* @brief get a reference to the BlockchainDB in use by Blockchain
*
* @return a reference to the BlockchainDB instance
*/
const BlockchainDB& get_db() const
{
return *m_db;
}
/**
* @brief get a reference to the BlockchainDB in use by Blockchain
*
* @return a reference to the BlockchainDB instance
*/
BlockchainDB& get_db()
{
return *m_db;
}
/// @brief return a reference to the service node list
const service_nodes::service_node_list &get_service_node_list() const { return m_service_node_list; }
/// @brief return a reference to the service node list
service_nodes::service_node_list &get_service_node_list() { return m_service_node_list; }
/**
* @brief get a number of outputs of a specific amount
*
* @param amount the amount
* @param offsets the indices (indexed to the amount) of the outputs
* @param outputs return-by-reference the outputs collected
*/
void output_scan_worker(const uint64_t amount,const std::vector<uint64_t> &offsets,
std::vector<output_data_t> &outputs) const;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
/**
* @brief computes the "short" and "long" hashes for a set of blocks
*
* @param height the height of the first block
* @param blocks the blocks to be hashed
* @param map return-by-reference the hashes for each block
*/
void block_longhash_worker(uint64_t height, const epee::span<const block> &blocks,
std::unordered_map<crypto::hash, crypto::hash> &map) const;
2016-12-04 13:27:45 +01:00
/**
* @brief returns a set of known alternate chains
*
2019-05-03 00:23:00 +02:00
* @return a vector of chains
*/
2019-05-03 00:23:00 +02:00
std::vector<std::pair<block_extended_info,std::vector<crypto::hash>>> get_alternative_chains() const;
void add_txpool_tx(const crypto::hash &txid, const cryptonote::blobdata &blob, const txpool_tx_meta_t &meta);
void update_txpool_tx(const crypto::hash &txid, const txpool_tx_meta_t &meta);
void remove_txpool_tx(const crypto::hash &txid);
uint64_t get_txpool_tx_count(bool include_unrelayed_txes = true) const;
bool get_txpool_tx_meta(const crypto::hash& txid, txpool_tx_meta_t &meta) const;
bool get_txpool_tx_blob(const crypto::hash& txid, cryptonote::blobdata &bd) const;
cryptonote::blobdata get_txpool_tx_blob(const crypto::hash& txid) const;
bool for_all_txpool_txes(std::function<bool(const crypto::hash&, const txpool_tx_meta_t&, const cryptonote::blobdata*)>, bool include_blob = false, bool include_unrelayed_txes = true) const;
bool is_within_compiled_block_hash_area(uint64_t height) const;
bool is_within_compiled_block_hash_area() const { return is_within_compiled_block_hash_area(m_db->height()); }
uint64_t prevalidate_block_hashes(uint64_t height, const std::vector<crypto::hash> &hashes);
uint32_t get_blockchain_pruning_seed() const { return m_db->get_blockchain_pruning_seed(); }
bool prune_blockchain(uint32_t pruning_seed = 0);
bool update_blockchain_pruning();
bool check_blockchain_pruning();
uint64_t get_immutable_height() const;
bool calc_batched_governance_reward(uint64_t height, uint64_t &reward) const;
Blockchain/tx_pool/batch lock fix Blockchain::prepare_handle_incoming_blocks locks m_tx_pool, but uses a local RAII lock on the blockchain object itself, then also starts a batch. Blockchain::cleanup_handle_incoming_blocks then also takes out a local RAII blockchain lock, then cleans up the batch. But the lmdb layer is retarded in that it throws an exception if any thread tries to write to the database while a batch is active in another thread, and so the blockchain lock is *also* used as a guard writes. Holding an open batch but *not* holding the blockchain lock then hits this exception if a write arrives in another thread at just the right time. This is, of course, terrible design at multiple layers, but this close to release I am reluctant to make more drastic fixes. Other small changes here: - All the locks in `blockchain.cpp` now use tools::unique_lock or tools::unique_locks rather than the nasty epee macro. This also reduces the likelihood of accidental deadlock because this means the dual txpool-blockchain locks are not taken out simultaneously via std::lock rather than sequentially. - Removed a completely useless "if (1)". Git blame shows that there was previously a condition here, but apparently the upstream monero author who changed it was afraid of removing the `if` keyword. - Reduced the sleep in the loop that waits for a batch to 100ms from 1000ms because sleeping for a full second for a fairly light test is insane. - boost isn't happy calling boost::lock() on the tx pool or blockchain object because the lock/unlock/try_lock methods are const, and so the workaround of using boost::lock because std::lock and std::shared_time_mutex are broken on the macOS SDK 10.11 that we use for mac builds now requires extra workarounds. Joy.
2019-12-24 18:36:52 +01:00
void lock() const { m_blockchain_lock.lock(); }
void unlock() const { m_blockchain_lock.unlock(); }
bool try_lock() const { return m_blockchain_lock.try_lock(); }
2016-12-04 13:27:45 +01:00
void cancel();
/**
* @brief called when we see a tx originating from a block
*
* Used for handling txes from historical blocks in a fast way
*/
void on_new_tx_from_block(const cryptonote::transaction &tx);
2018-06-29 06:47:00 +02:00
/**
* @brief add a hook for processing new blocks and rollbacks for reorgs
RPC overhaul High-level details: This redesigns the RPC layer to make it much easier to work with, decouples it from an embedded HTTP server, and gets the vast majority of the RPC serialization and dispatch code out of a very commonly included header. There is unfortunately rather a lot of interconnected code here that cannot be easily separated out into separate commits. The full details of what happens here are as follows: Major details: - All of the RPC code is now in a `cryptonote::rpc` namespace; this renames quite a bit to be less verbose: e.g. CORE_RPC_STATUS_OK becomes `rpc::STATUS_OK`, and `cryptonote::COMMAND_RPC_SOME_LONG_NAME` becomes `rpc::SOME_LONG_NAME` (or just SOME_LONG_NAME for code already working in the `rpc` namespace). - `core_rpc_server` is now completely decoupled from providing any request protocol: it is now *just* the core RPC call handler. - The HTTP RPC interface now lives in a new rpc/http_server.h; this code handles listening for HTTP requests and dispatching them to core_rpc_server, then sending the results back to the caller. - There is similarly a rpc/lmq_server.h for LMQ RPC code; more details on this (and other LMQ specifics) below. - RPC implementing code now returns the response object and throws when things go wrong which simplifies much of the rpc error handling. They can throw anything; generic exceptions get logged and a generic "internal error" message gets returned to the caller, but there is also an `rpc_error` class to return an error code and message used by some json-rpc commands. - RPC implementing functions now overload `core_rpc_server::invoke` following the pattern: RPC_BLAH_BLAH::response core_rpc_server::invoke(RPC_BLAH_BLAH::request&& req, rpc_context context); This overloading makes the code vastly simpler: all instantiations are now done with a small amount of generic instantiation code in a single .cpp rather than needing to go to hell and back with a nest of epee macros in a core header. - each RPC endpoint is now defined by the RPC types themselves, including its accessible names and permissions, in core_rpc_server_commands_defs.h: - every RPC structure now has a static `names()` function that returns the names by which the end point is accessible. (The first one is the primary, the others are for deprecated aliases). - RPC command wrappers define their permissions and type by inheriting from special tag classes: - rpc::RPC_COMMAND is a basic, admin-only, JSON command, available via JSON RPC. *All* JSON commands are now available via JSON RPC, instead of the previous mix of some being at /foo and others at /json_rpc. (Ones that were previously at /foo are still there for backwards compatibility; see `rpc::LEGACY` below). - rpc::PUBLIC specifies that the command should be available via a restricted RPC connection. - rpc::BINARY specifies that the command is not JSON, but rather is accessible as /name and takes and returns values in the magic epee binary "portable storage" (lol) data format. - rpc::LEGACY specifies that the command should be available via the non-json-rpc interface at `/name` for backwards compatibility (in addition to the JSON-RPC interface). - some epee serialization got unwrapped and de-templatized so that it can be moved into a .cpp file with just declarations in the .h. (This makes a *huge* difference for core_rpc_server_commands_defs.h and for every compilation unit that includes it which previously had to compile all the serialization code and then throw all by one copy away at link time). This required some new macros so as to not break a ton of places that will use the old way putting everything in the headers; The RPC code uses this as does a few other places; there are comments in contrib/epee/include/serialization/keyvalue_serialization.h as to how to use it. - Detemplatized a bunch of epee/storages code. Most of it should have have been using templates at all (because it can only ever be called with one type!), and now it isn't. This broke some things that didn't properly compile because of missing headers or (in one case) a messed up circular dependency. - Significantly simplified a bunch of over-templatized serialization code. - All RPC serialization definitions is now out of core_rpc_server_commands_defs.h and into a single .cpp file (core_rpc_server_commands_defs.cpp). - core RPC no longer uses the disgusting BEGIN_URI_MAP2/MAP_URI_BLAH_BLAH macros. This was a terrible design that forced slamming tons of code into a common header that didn't need to be there. - epee::struct_init is gone. It was a horrible hack that instiated multiple templates just so the coder could be so lazy and write `some_type var;` instead of properly value initializing with `some_type var{};`. - Removed a bunch of useless crap from epee. In particular, forcing extra template instantiations all over the place in order to nest return objects inside JSON RPC values is no longer needed, as are a bunch of stuff related to the above de-macroization of the code. - get_all_service_nodes, get_service_nodes, and get_n_service_nodes are now combined into a single `get_service_nodes` (with deprecated aliases for the others), which eliminates a fair amount of duplication. The biggest obstacle here was getting the requested fields reference passed through: this is now done by a new ability to stash a context in the serialization object that can be retrieved by a sub-serialized type. LMQ-specifics: - The LokiMQ instance moves into `cryptonote::core` rather than being inside cryptonote_protocol. Currently the instance is used both for qnet and rpc calls (and so needs to be in a common place), but I also intend future PRs to use the batching code for job processing (replacing the current threaded job queue). - rpc/lmq_server.h handles the actual LMQ-request-to-core-RPC glue. Unlike http_server it isn't technically running the whole LMQ stack from here, but the parallel name with http_server seemed appropriate. - All RPC endpoints are supported by LMQ under the same names as defined generically, but prefixed with `rpc.` for public commands and `admin.` for restricted ones. - service node keys are now always available, even when not running in `--service-node` mode: this is because we want the x25519 key for being able to offer CURVE encryption for lmq RPC end-points, and because it doesn't hurt to have them available all the time. In the RPC layer this is now called "get_service_keys" (with "get_service_node_key" as an alias) since they aren't strictly only for service nodes. This also means code needs to check m_service_node, and not m_service_node_keys, to tell if it is running as a service node. (This is also easier to notice because m_service_node_keys got renamed to `m_service_keys`). - Added block and mempool monitoring LMQ RPC endpoints: `sub.block` and `sub.mempool` subscribes the connection for new block and new mempool TX notifications. The latter can notify on just blink txes, or all new mempool txes (but only new ones -- txes dumped from a block don't trigger it). The client gets pushed a [`notify.block`, `height`, `hash`] or [`notify.tx`, `txhash`, `blob`] message when something arrives. Minor details: - rpc::version_t is now a {major,minor} pair. Forcing everyone to pack and unpack a uint32_t was gross. - Changed some macros to constexprs (e.g. CORE_RPC_ERROR_CODE_...). (This immediately revealed a couple of bugs in the RPC code that was assigning CORE_RPC_ERROR_CODE_... to a string, and it worked because the macro allows implicit conversion to a char). - De-templatizing useless templates in epee (i.e. a bunch of templated types that were never invoked with different types) revealed a painful circular dependency between epee and non-epee code for tor_address and i2p_address. This crap is now handled in a suitably named `net/epee_network_address_hack.cpp` hack because it really isn't trivial to extricate this mess. - Removed `epee/include/serialization/serialize_base.h`. Amazingly the code somehow still all works perfectly with this previously vital header removed. - Removed bitrotted, unused epee "crypted_storage" and "gzipped_inmemstorage" code. - Replaced a bunch of epee::misc_utils::auto_scope_leave_caller with LOKI_DEFERs. The epee version involves quite a bit more instantiation and is ugly as sin. Also made the `loki::defer` class invokable for some edge cases that need calling before destruction in particular conditions. - Moved the systemd code around; it makes much more sense to do the systemd started notification as in daemon.cpp as late as possible rather than in core (when we can still have startup failures, e.g. if the RPC layer can't start). - Made the systemd short status string available in the get_info RPC (and no longer require building with systemd). - during startup, print (only) the x25519 when not in SN mode, and continue to print all three when in SN mode. - DRYed out some RPC implementation code (such as set_limit) - Made wallet_rpc stop using a raw m_wallet pointer
2020-04-28 01:25:43 +02:00
*
* TODO: replace these with more versatile std::functions
2018-06-29 06:47:00 +02:00
*/
Infinite Staking Part 1 (#387) * Remove dead branches in hot-path check_tx_inputs Also renames #define for mixins to better match naming convention * Shuffle around some more code into common branches * Fix min/max tx version rules, since there 1 tx v2 on v9 fork * First draft infinite staking implementation * Actually generate the right key image and expire appropriately * Add framework to lock key images after expiry * Return locked key images for nodes, add request unlock option * Introduce transaction types for key image unlock * Update validation steps to accept tx types, key_image_unlock * Add mapping for lockable key images to amounts * Change inconsistent naming scheme of contributors * Create key image unlock transaction type and process it * Update tx params to allow v4 types and as a result construct_tx* * Fix some serialisation issues not sending all the information * Fix dupe tx extra tag causing incorrect deserialisation * Add warning comments * Fix key image unlocks parsing error * Simplify key image proof checks * Fix rebase errors * Correctly calculate the key image unlock times * Blacklist key image on deregistration * Serialise key image blacklist * Rollback blacklisted key images * Fix expiry logic error * Disallow requesting stake unlock if already unlocked client side * Add double spend checks for key image unlocks * Rename get_staking_requirement_lock_blocks To staking_initial_num_lock_blocks * Begin modifying output selection to not use locked outputs * Modify output selection to avoid locked/blacklisted key images * Cleanup and undoing some protocol breakages * Simplify expiration of nodes * Request unlock schedules entire node for expiration * Fix off by one in expiring nodes * Undo expiring code for pre v10 nodes * Fix RPC returning register as unlock height and not checking 0 * Rename key image unlock height const * Undo testnet hardfork debug changes * Remove is_type for get_type, fix missing var rename * Move serialisable data into public namespace * Serialise tx types properly * Fix typo in no service node known msg * Code review * Fix == to >= on serialising tx type * Code review 2 * Fix tests and key image unlock * Add additional test, fix assert * Remove debug code in wallet * Fix merge dev problem
2019-01-25 04:15:52 +01:00
void hook_block_added (BlockAddedHook& hook) { m_block_added_hooks.push_back(&hook); }
void hook_blockchain_detached(BlockchainDetachedHook& hook) { m_blockchain_detached_hooks.push_back(&hook); }
void hook_init (InitHook& hook) { m_init_hooks.push_back(&hook); }
void hook_validate_miner_tx (ValidateMinerTxHook& hook) { m_validate_miner_tx_hooks.push_back(&hook); }
void hook_alt_block_added (AltBlockAddedHook& hook) { m_alt_block_added_hooks.push_back(&hook); }
2018-06-29 06:47:00 +02:00
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
/**
* @brief returns the timestamps of the last N blocks
*/
std::vector<time_t> get_last_block_timestamps(unsigned int blocks) const;
/**
* @brief removes blocks from the top of the blockchain
*
* @param nblocks number of blocks to be removed
*/
void pop_blocks(uint64_t nblocks);
/**
* Rolls back the blockchain to the given height when necessary for admitting blink
* transactions.
*/
bool blink_rollback(uint64_t rollback_height);
2021-02-12 05:19:30 +01:00
ons::name_system_db &name_system_db() { return m_ons_db; }
Fix upgrade, memory leak, abstract sqlite3 interface I started this PR to just fix the upgrades, but when investigating I found some problematic memory leaks as well and ended up adding a bunch of nice abstraction layers to make the code nicer. I'd normally separate these into multiple commits, but they ended up quite interdependent to the point where it isn't really feasible to do so; but here's the details on the changes: - add a `DEFAULT "register_height"` for the new "update_height" field so that the alter table works - reorganized the statement preparation and database migration so that all statement preparation happens *after* migration; otherwise statement preparation fails because some of the statements reference columns that aren't created until the migration. - created a `sql_compiled_statement` class that manages sqlite statement pointers; there were several places that we were leaking them (the main ones were okay, but we also have some dynamically constructed ones for variable size queries). - added a new abstraction layer around sqlite3 binding and value extraction that makes the actual sql interaction code much less verbose. - removed the `nettype` argument from `sql_run_statement` as it wasn't being used. (If something in the future needs it, the compiled statement class has a reference to the db object, so it can be obtained via `statement.nsdb.network_type()`) - `bind(statement, 1, someval)` now does what `sqlite3_bind_whatever(statement, 1, someval, ...)` did before, except that it infers the "whatever" from the type (for everything except blobs), and does whatever "..." needs to happen. - `bind(statement, 1, blob_view{someval.data(), size})` can do the same for a blob. (There is also `bind_blob(statement, 1, someval.data(), size)` which does the same thing but is nicer in some contexts). - `auto x = get<T>(statement, 1)` is the bind counterpart for extracting a known type (and similarly calls the right sqlite function based on `T`). There is also a `get(statement, 1, x);` that does the same thing, but infers the type from whatever `x` is. `get_blob` similarly gets a blob value, though currently this is only used in the existing `sql_copy_blob()` wrapper. - All the `bind` and `get` forms accept an integer enum class as the position argument, so the code can now write just: bind(statement, lns_db_setting_column::top_height, value); instead of: bind(statement, static_cast<int>(lns_db_setting_column::top_height), value); - bind_all lets you bind all the parameters in one go (and also clears existing bindings), letting you write things like: bind_all(statement, 123, "my string", my_u64_int); which previously would have needed: sqlite3_clear_bindings(statement); sqlite3_bind_int(statement, 1, 123); sqlite3_bind_text(statement, 2, "my string", 10, nullptr, nullptr); sqlite3_bind_int64(statement, 3, my_u64_int); - Also there's a `bind_and_run` which combines this binding with a sql_run_statement. - put all of the new binding code in an anonymous namespace, along with several existing `static` functions (this is equivalent, just easier than having to write static dozens of times). - removed a superfluous extra "OR" clause in get_mappings_by_owners. It was producing queries such as: ... WHERE o1.address IN (?, ?, ?) OR o2.address IN (?, ?, ?) OR o2.address IN (?, ?, ?) also simplified that part of the code down quite a bit with a pre-allocated string for the "?, ?, ?" part. - Simplified some some pointer/size pairs with lokimq::string_view (which, internally, is just a pointer size pair already). - Various methods got de-const'ed in this change. They probably shouldn't have been const before because even though they only access data, they still mutate internal state of the stored statements, and making them all mutable would be a bit gross. (They did this before, too, but since everything was pointers calling into C code I guess they compiler just allowed it).
2020-03-30 20:00:54 +02:00
2021-02-12 05:19:30 +01:00
const ons::name_system_db &name_system_db() const { return m_ons_db; }
std::shared_ptr<cryptonote::BlockchainSQLite> sqlite_db() { return m_sqlite_db; }
/**
* @brief flush the invalid blocks set
*/
void flush_invalid_blocks();
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
#ifndef IN_UNIT_TESTS
private:
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
#endif
struct block_pow_verified
{
bool valid;
bool precomputed;
bool per_block_checkpointed;
crypto::hash proof_of_work;
};
/**
* @brief Verify block proof of work against the expected difficulty
*/
block_pow_verified verify_block_pow (cryptonote::block const &blk, difficulty_type difficulty, uint64_t chain_height, bool alt_block);
bool basic_block_checks(cryptonote::block const &blk, bool alt_block);
struct block_template_info
{
bool is_miner;
account_public_address miner_address;
service_nodes::payout service_node_payout;
};
bool create_block_template_internal(block& b, const crypto::hash *from_block, block_template_info const &info, difficulty_type& di, uint64_t& height, uint64_t& expected_reward, const blobdata& ex_nonce);
2021-01-04 01:09:45 +01:00
bool load_missing_blocks_into_oxen_subsystems();
// TODO: evaluate whether or not each of these typedefs are left over from blockchain_storage
typedef std::unordered_set<crypto::key_image> key_images_container;
typedef std::vector<block_extended_info> blocks_container;
typedef std::unordered_map<crypto::hash, block_extended_info> blocks_ext_by_hash;
BlockchainDB* m_db;
tx_memory_pool& m_tx_pool;
service_nodes::service_node_list& m_service_node_list;
2021-02-12 05:19:30 +01:00
ons::name_system_db m_ons_db;
std::shared_ptr<cryptonote::BlockchainSQLite> m_sqlite_db;
2018-06-29 06:47:00 +02:00
mutable std::recursive_mutex m_blockchain_lock; // TODO: add here reader/writer lock
// main chain
size_t m_current_block_cumul_weight_limit;
size_t m_current_block_cumul_weight_median;
// metadata containers
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
std::unordered_map<crypto::hash, std::unordered_map<crypto::key_image, std::vector<output_data_t>>> m_scan_table;
std::unordered_map<crypto::hash, crypto::hash> m_blocks_longhash_table;
// Keccak hashes for each block and for fast pow checking
std::vector<crypto::hash> m_blocks_hash_of_hashes;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
std::vector<crypto::hash> m_blocks_hash_check;
std::vector<crypto::hash> m_blocks_txs_check;
blockchain_db_sync_mode m_db_sync_mode;
bool m_fast_sync;
bool m_show_time_stats;
bool m_db_default_sync;
bool m_db_sync_on_blocks;
uint64_t m_db_sync_threshold;
uint64_t m_max_prepare_blocks_threads;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
uint64_t m_fake_pow_calc_time;
uint64_t m_fake_scan_time;
uint64_t m_sync_counter;
uint64_t m_bytes_to_sync;
uint64_t m_long_term_block_weights_window;
uint64_t m_long_term_effective_median_block_weight;
mutable crypto::hash m_long_term_block_weights_cache_tip_hash;
mutable epee::misc_utils::rolling_median_t<uint64_t> m_long_term_block_weights_cache_rolling_median;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
// NOTE: PoW/Difficulty Cache
// Before HF16, we use timestamps and difficulties only.
// After HF16, we check if the state of block producing in Pulse and return
// the PoW difficulty or the fixed Pulse difficulty if we're still eligible
// for Pulse blocks.
struct
{
std::mutex m_difficulty_lock;
// NOTE: PoW Difficulty Calculation Metadata
std::vector<uint64_t> m_timestamps;
std::vector<difficulty_type> m_difficulties;
// NOTE: Cache Invalidation Checks
uint64_t m_timestamps_and_difficulties_height{0};
crypto::hash m_difficulty_for_next_block_top_hash{crypto::null_hash};
difficulty_type m_difficulty_for_next_miner_block{1};
} m_cache;
boost::asio::io_service m_async_service;
std::thread m_async_thread;
std::unique_ptr<boost::asio::io_service::work> m_async_work_idle;
// some invalid blocks
std::set<crypto::hash> m_invalid_blocks;
2018-06-29 06:47:00 +02:00
std::vector<BlockAddedHook*> m_block_added_hooks;
std::vector<BlockchainDetachedHook*> m_blockchain_detached_hooks;
std::vector<InitHook*> m_init_hooks;
std::vector<ValidateMinerTxHook*> m_validate_miner_tx_hooks;
std::vector<AltBlockAddedHook*> m_alt_block_added_hooks;
checkpoints m_checkpoints;
2015-09-12 12:14:00 +02:00
2018-02-16 12:04:04 +01:00
network_type m_nettype;
bool m_offline;
difficulty_type m_fixed_difficulty;
2016-12-04 13:27:45 +01:00
std::atomic<bool> m_cancel;
// block template cache
block m_btc;
account_public_address m_btc_address;
blobdata m_btc_nonce;
uint64_t m_btc_height;
uint64_t m_btc_pool_cookie;
uint64_t m_btc_expected_reward;
bool m_btc_valid;
bool m_batch_success;
std::shared_ptr<tools::Notify> m_block_notify;
std::shared_ptr<tools::Notify> m_reorg_notify;
// for prepare_handle_incoming_blocks
uint64_t m_prepare_height;
uint64_t m_prepare_nblocks;
std::vector<block> *m_prepare_blocks;
std::chrono::steady_clock::time_point last_outdated_warning = {};
std::mutex last_outdated_warning_mutex;
/**
* @brief collects the keys for all outputs being "spent" as an input
*
* This function makes sure that each "input" in an input (mixins) exists
* and collects the public key for each from the transaction it was included in
* via the visitor passed to it.
*
* If pmax_related_block_height is not NULL, its value is set to the height
* of the most recent block which contains an output used in the input set
*
* @tparam visitor_t a class encapsulating tx is unlocked and collect tx key
* @param tx_in_to_key a transaction input instance
* @param vis an instance of the visitor to use
* @param tx_prefix_hash the hash of the associated transaction_prefix
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
* @param tx_version version of the tx, if > 1 we also get commitments
*
* @return false if any keys are not found or any inputs are not unlocked, otherwise true
*/
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
template<class visitor_t>
Make tx type and version scoped enums This converts the transaction type and version to scoped enum, giving type safety and making the tx type assignment less error prone because there is no implicit conversion or comparison with raw integers that has to be worried about. This ends up converting any use of `cryptonote::transaction::type_xyz` to `cryptonote::transaction::txtype::xyz`. For version, names like `transaction::version_v4` become `cryptonote::txversion::v4_tx_types`. This also allows/includes various other simplifications related to or enabled by this change: - handle `is_deregister` dynamically in serialization code (setting `type::standard` or `type::deregister` rather than using a version-determined union) - `get_type()` is no longer needed with the above change: it is now much simpler to directly access `type` which will always have the correct value (even for v2 or v3 transaction types). And though there was an assertion on the enum value, `get_type()` was being used only sporadically: many places accessed `.type` directly. - the old unscoped enum didn't have a type but was assumed castable to/from `uint16_t`, which technically meant there was potential undefined behaviour when deserializing any type values >= 8. - tx type range checks weren't being done in all serialization paths; they are now. Because `get_type()` was not used everywhere (lots of places simply accessed `.type` directory) these might not have been caught. - `set_type()` is not needed; it was only being used in a single place (wallet2.cpp) and only for v4 txes, so the version protection code was never doing anything. - added a std::ostream << operator for the enum types so that they can be output with `<< tx_type <<` rather than needing to wrap it in `type_to_string(tx_type)` everywhere. For the versions, you get the annotated version string (e.g. 4_tx_types) rather than just the number 4.
2019-06-11 20:53:46 +02:00
bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height = NULL) const;
/**
* @brief collect output public keys of a transaction input set
*
* This function locates all outputs associated with a given input set (mixins)
* and validates that they exist and are usable
* (unlocked, unspent is checked elsewhere).
*
* If pmax_related_block_height is not NULL, its value is set to the height
* of the most recent block which contains an output used in the input set
*
* @param txin the transaction input
* @param tx_prefix_hash the transaction prefix hash, for caching organization
* @param output_keys return-by-reference the public keys of the outputs in the input set
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
*
* @return false if any output is not yet unlocked, or is missing, otherwise true
*/
2019-10-23 07:12:29 +02:00
bool check_tx_input(const txin_to_key& txin, const crypto::hash& tx_prefix_hash, std::vector<rct::ctkey> &output_keys, uint64_t* pmax_related_block_height);
/**
* @brief validate a transaction's inputs and their keys
*
* This function validates transaction inputs and their keys. Previously
* it also performed double spend checking, but that has been moved to its
* own function.
* The transaction's rct signatures, if any, are expanded.
*
* If pmax_related_block_height is not NULL, its value is set to the height
* of the most recent block which contains an output used in any input set
*
* Currently this function calls ring signature validation for each
* transaction.
*
Relax deregistration rules The replaces the deregistration mechanism with a new state change mechanism (beginning at the v12 fork) which can change a service node's network status via three potential values (and is extensible in the future to handle more): - deregistered -- this is the same as the existing deregistration; the SN is instantly removed from the SN list. - decommissioned -- this is a sort of temporary deregistration: your SN remains in the service node list, but is removed from the rewards list and from any network duties. - recommissioned -- this tx is sent by a quorum if they observe a decommissioned SN sending uptime proofs again. Upon reception, the SN is reactivated and put on the end of the reward list. Since this is broadening the quorum use, this also renames the relevant quorum to a "obligations" quorum (since it validates SN obligations), while the transactions are "state_change" transactions (since they change the state of a registered SN). The new parameters added to service_node_rules.h control how this works: // Service node decommissioning: as service nodes stay up they earn "credits" (measured in blocks) // towards a future outage. A new service node starts out with INITIAL_CREDIT, and then builds up // CREDIT_PER_DAY for each day the service node remains active up to a maximum of // DECOMMISSION_MAX_CREDIT. // // If a service node stops sending uptime proofs, a quorum will consider whether the service node // has built up enough credits (at least MINIMUM): if so, instead of submitting a deregistration, // it instead submits a decommission. This removes the service node from the list of active // service nodes both for rewards and for any active network duties. If the service node comes // back online (i.e. starts sending the required performance proofs again) before the credits run // out then a quorum will reinstate the service node using a recommission transaction, which adds // the service node back to the bottom of the service node reward list, and resets its accumulated // credits to 0. If it does not come back online within the required number of blocks (i.e. the // accumulated credit at the point of decommissioning) then a quorum will send a permanent // deregistration transaction to the network, starting a 30-day deregistration count down. This commit currently includes values (which are not necessarily finalized): - 8 hours (240 blocks) of credit required for activation of a decommission (rather than a deregister) - 0 initial credits at registration - a maximum of 24 hours (720 blocks) of credits - credits accumulate at a rate that you hit 24 hours of credits after 30 days of operation. Miscellaneous other details of this PR: - a new TX extra tag is used for the state change (including deregistrations). The old extra tag has no version or type tag, so couldn't be reused. The data in the new tag is slightly more efficiently packed than the old deregistration transaction, so it gets used for deregistrations (starting at the v12 fork) as well. - Correct validator/worker selection required generalizing the shuffle function to be able to shuffle just part of a vector. This lets us stick any down service nodes at the end of the potential list, then select validators by only shuffling the part of the index vector that contains active service indices. Once the validators are selected, the remainder of the list (this time including decommissioned SN indices) is shuffled to select quorum workers to check, thus allowing decommisioned nodes to be randomly included in the nodes to check without being selected as a validator. - Swarm recalculation was not quite right: swarms were recalculated on SN registrations, even if those registrations were include shared node registrations, but *not* recalculated on stakes. Starting with the upgrade this behaviour is fixed (swarms aren't actually used currently and aren't consensus-relevant so recalculating early won't hurt anything). - Details on decomm/dereg are added to RPC info and print_sn/print_sn_status - Slightly improves the % of reward output in the print_sn output by rounding it to two digits, and reserves space in the output string to avoid excessive reallocations. - Adds various debugging at higher debug levels to quorum voting (into all of voting itself, vote transmission, and vote reception). - Reset service node list internal data structure version to 0. The SN list has to be rescanned anyway at upgrade (its size has changed), so we might as well reset the version and remove the version-dependent serialization code. (Note that the affected code here is for SN states in lmdb storage, not for SN-to-SN communication serialization).
2019-06-18 23:57:02 +02:00
* This fails if called on a non-standard metadata transaction such as a deregister; you
* generally want to call check_tx() instead, which calls this if appropriate.
*
* @param tx the transaction to validate
* @param tvc returned information about tx verification
* @param pmax_related_block_height return-by-pointer the height of the most recent block in the input set
* @param key_image_conflicts if specified then don't fail on duplicate key images but instead add them here for the caller to decide on
*
* @return false if any validation step fails, otherwise true
*/
bool check_tx_inputs(transaction& tx, tx_verification_context &tvc, uint64_t* pmax_used_block_height = nullptr, std::unordered_set<crypto::key_image>* key_image_conflicts = nullptr);
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
/**
* @brief performs a blockchain reorganization according to the longest chain rule
*
* This function aggregates all the actions necessary to switch to a
* newly-longer chain. If any step in the reorganization process fails,
* the blockchain is reverted to its previous state.
*
* @param alt_chain the chain to switch to
* @param keep_disconnected_chain whether or not to keep the old chain as an alternate
*
* @return false if the reorganization fails, otherwise true
*/
bool switch_to_alternative_blockchain(const std::list<block_extended_info>& alt_chain, bool keep_disconnected_chain);
/**
* @brief removes the most recent block from the blockchain
*
* @return the block removed
*/
block pop_block_from_blockchain();
/**
* @brief validate and add a new block to the end of the blockchain
*
* When a block is given to Blockchain to be added to the blockchain, it
* is passed here if it is determined to belong at the end of the current
* chain.
*
* @param bl the block to be added
* @param id the hash of the block
* @param bvc metadata concerning the block's validity
* @param notify if set to true, sends new block notification on success
*
* @return true if the block was added successfully, otherwise false
*/
bool handle_block_to_main_chain(const block& bl, const crypto::hash& id, block_verification_context& bvc, checkpoint_t const *checkpoint, bool notify = true);
/**
* @brief validate and add a new block to an alternate blockchain
*
* If a block to be added does not belong to the main chain, but there
* is an alternate chain to which it should be added, that is handled
* here.
*
* @param b the block to be added
* @param id the hash of the block
* @param bvc metadata concerning the block's validity
*
* @return true if the block was added successfully, otherwise false
*/
bool handle_alternative_block(const block& b, const crypto::hash& id, block_verification_context& bvc, checkpoint_t const *checkpoint);
/**
* @brief builds a list of blocks connecting a block to the main chain
*
* @param prev_id the block hash of the tip of the alt chain
* @param alt_chain the chain to be added to
* @param timestamps returns the timestamps of previous blocks
* @param bvc the block verification context for error return
*
* @return true on success, false otherwise
*/
bool build_alt_chain(const crypto::hash &prev_id, std::list<block_extended_info>& alt_chain, std::vector<uint64_t> &timestamps, block_verification_context& bvc, int *num_alt_checkpoints, int *num_checkpoints);
/**
* @brief gets the difficulty requirement for a new block on an alternate chain
*
* @param alt_chain the chain to be added to
* @param bei the block being added (and metadata, see ::block_extended_info)
* @param pulse if the block to add is a block generated by Pulse
*
* @return the difficulty requirement
*/
difficulty_type get_difficulty_for_alternative_chain(const std::list<block_extended_info>& alt_chain, uint64_t height, bool pulse) const;
/**
* @brief sanity checks a miner transaction before validating an entire block
*
* This function merely checks basic things like the structure of the miner
* transaction, the unlock time, and that the amount doesn't overflow.
*
* @param b the block containing the miner transaction
* @param height the height at which the block will be added
* @param hf_version the consensus rules to apply
*
* @return false if anything is found wrong with the miner transaction, otherwise true
*/
bool prevalidate_miner_transaction(const block& b, uint64_t height, uint8_t hf_version);
/**
* @brief validates a miner (coinbase) transaction
*
* This function makes sure that the miner calculated his reward correctly
* and that his miner transaction totals reward + fee.
*
* @param b the block containing the miner transaction to be validated
* @param cumulative_block_weight the block's weight
* @param fee the total fees collected in the block
* @param base_reward return-by-reference the new block's generated coins
* @param already_generated_coins the amount of currency generated prior to this block
* @param version hard fork version for that transaction
*
* @return false if anything is found wrong with the miner transaction, otherwise true
*/
bool validate_miner_transaction(const block& b, size_t cumulative_block_weight, uint64_t fee, uint64_t& base_reward, uint64_t already_generated_coins, uint8_t version);
/**
* @brief reverts the blockchain to its previous state following a failed switch
*
* If Blockchain fails to switch to an alternate chain when it means
* to do so, this function reverts the blockchain to how it was before
* the attempted switch.
*
* @param original_chain the chain to switch back to
* @param rollback_height the height to revert to before appending the original chain
*
* @return false if something goes wrong with reverting (very bad), otherwise true
*/
bool rollback_blockchain_switching(const std::list<block_and_checkpoint>& original_chain, uint64_t rollback_height);
/**
* @brief gets recent block weights for median calculation
*
* get the block weights of the last <count> blocks, and return by reference <weights>.
*
* @param weights return-by-reference the list of weights
* @param count the number of blocks to get weights for
*/
void get_last_n_blocks_weights(std::vector<uint64_t>& weights, size_t count) const;
/**
* @brief gets block long term weight median
*
* get the block long term weight median of <count> blocks starting at <start_height>
*
* @param start_height the block height of the first block to query
* @param count the number of blocks to get weights for
*
* @return the long term median block weight
*/
uint64_t get_long_term_block_weight_median(uint64_t start_height, size_t count) const;
/**
* @brief checks if a transaction is unlocked (its outputs spendable)
*
* This function checks to see if an output is unlocked.
* unlock_time is either a block index or a unix time.
*
* @param unlock_time the unlock parameter (height or time)
*
* @return true if spendable, otherwise false
*/
bool is_output_spendtime_unlocked(uint64_t unlock_time) const;
/**
* @brief stores an invalid block in a separate container
*
* Storing invalid blocks allows quick dismissal of the same block
* if it is seen again.
*
* @param bl the invalid block
* @param h the block's hash
*
* @return false if the block cannot be stored for some reason, otherwise true
*/
bool add_block_as_invalid(const cryptonote::block &block);
/**
* @brief checks a block's timestamp
*
* This function grabs the timestamps from the most recent <n> blocks,
* where n = BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW. If there are not those many
* blocks in the blockchain, the timestap is assumed to be valid. If there
* are, this function returns:
* true if the block's timestamp is not less than the timestamp of the
* median of the selected blocks
* false otherwise
*
* @param b the block to be checked
* @param median_ts return-by-reference the median of timestamps
*
* @return true if the block's timestamp is valid, otherwise false
*/
bool check_block_timestamp(const block& b, uint64_t& median_ts) const;
bool check_block_timestamp(const block& b) const { uint64_t median_ts; return check_block_timestamp(b, median_ts); }
/**
* @brief checks a block's timestamp
*
* If the block is not more recent than the median of the recent
* timestamps passed here, it is considered invalid.
*
* @param timestamps a list of the most recent timestamps to check against
* @param b the block to be checked
*
* @return true if the block's timestamp is valid, otherwise false
*/
bool check_block_timestamp(std::vector<uint64_t> timestamps, const block& b, uint64_t& median_ts) const;
bool check_block_timestamp(std::vector<uint64_t> timestamps, const block& b) const { uint64_t median_ts; return check_block_timestamp(std::move(timestamps), b, median_ts); }
/**
* @brief get the "adjusted time"
*
* Currently this simply returns the current time according to the
* user's machine.
*
* @return the current time
*/
uint64_t get_adjusted_time() const;
/**
* @brief finish an alternate chain's timestamp window from the main chain
*
* for an alternate chain, get the timestamps from the main chain to complete
* the needed number of timestamps for the BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW.
*
* @param start_height the alternate chain's attachment height to the main chain
* @param timestamps return-by-value the timestamps set to be populated
*
* @return true unless start_height is greater than the current blockchain height
*/
bool complete_timestamps_vector(uint64_t start_height, std::vector<uint64_t>& timestamps) const;
/**
* @brief calculate the block weight limit for the next block to be added
*
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
* @param long_term_effective_median_block_weight optionally return that value
*
* @return true
*/
ArticMine's new block weight algorithm This curbs runaway growth while still allowing substantial spikes in block weight Original specification from ArticMine: here is the scaling proposal Define: LongTermBlockWeight Before fork: LongTermBlockWeight = BlockWeight At or after fork: LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight) Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time. Define: LongTermEffectiveMedianBlockWeight LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight)) Change Definition of EffectiveMedianBlockWeight From (current definition) EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight)) To (proposed definition) EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight) Notes: 1) There are no other changes to the existing penalty formula, median calculation, fees etc. 2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork. 3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
2019-01-21 18:18:50 +01:00
bool update_next_cumulative_weight_limit(uint64_t *long_term_effective_median_block_weight = NULL);
void return_tx_to_pool(std::vector<std::pair<transaction, blobdata>> &txs);
/**
* @brief make sure a transaction isn't attempting a double-spend
*
* @param tx the transaction to check
* @param keys_this_block a cumulative list of spent keys for the current block
*
* @return false if a double spend was detected, otherwise true
*/
bool check_for_double_spend(const transaction& tx, key_images_container& keys_this_block) const;
/**
* @brief validates a transaction input's ring signature
*
* @param tx_prefix_hash the transaction prefix' hash
* @param key_image the key image generated from the true input
* @param pubkeys the public keys for each input in the ring signature
* @param sig the signature generated for each input in the ring signature
* @param result false if the ring signature is invalid, otherwise true
*/
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY) Bockchain: 1. Optim: Multi-thread long-hash computation when encountering groups of blocks. 2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible. 3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible. 4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks. 5. Optim: Multi-thread signature computation whenever possible. 6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD) 7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???). 8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads). Berkeley-DB: 1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc). 2. Fix: Unable to pop blocks on reorganize due to transaction errors. 3. Patch: Large number of transaction aborts when running multi-threaded bulk queries. 4. Patch: Insufficient locks error when running full sync. 5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation. 6. Optim: Add bulk queries to get output global indices. 7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 9. Optim: Added thread-safe buffers used when multi-threading bulk queries. 10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details) 11. Mod: Added checkpoint thread and auto-remove-logs option. 12. *Now usable on 32-bit systems like RPI2. LMDB: 1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect) 2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3) 3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key 4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details) 5. Mod: Auto resize to +1GB instead of multiplier x1.5 ETC: 1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete. 2. Fix: 32-bit saturation bug when computing next difficulty on large blocks. [PENDING ISSUES] 1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization. This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD. 2. Berkeley db, possible bug "unable to allocate memory". TBD. [NEW OPTIONS] (*Currently all enabled for testing purposes) 1. --fast-block-sync arg=[0:1] (default: 1) a. 0 = Compute long hash per block (may take a while depending on CPU) b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence) 2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000) a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions. b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache. Fast - Write meta-data but defer data flush. Fastest - Defer meta-data and data flush. Sync - Flush data after nblocks_per_sync and wait. Async - Flush data after nblocks_per_sync but do not wait for the operation to finish. 3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower) Max number of threads to use when computing long-hash in groups. 4. --show-time-stats arg=[0:1] (default: 1) Show benchmark related time stats. 5. --db-auto-remove-logs arg=[0:1] (default: 1) For berkeley-db only. Auto remove logs if enabled. **Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version. At the moment, you need a full resync to use this optimized version. [PERFORMANCE COMPARISON] **Some figures are approximations only. Using a baseline machine of an i7-2600K+SSD+(with full pow computation): 1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain. 2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain. 3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain. Averate procesing times (with full pow computation): lmdb-optimized: 1. tx_ave = 2.5 ms / tx 2. block_ave = 5.87 ms / block memory-official-repo: 1. tx_ave = 8.85 ms / tx 2. block_ave = 19.68 ms / block lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e) 1. tx_ave = 47.8 ms / tx 2. block_ave = 64.2 ms / block **Note: The following data denotes processing times only (does not include p2p download time) lmdb-optimized processing times (with full pow computation): 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000). 2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000). 3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000). lmdb-optimized processing times (with per-block-checkpoint) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with full pow computation) 1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000). 2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000). berkeley-db optimized processing times (with per-block-checkpoint) 1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 22:09:32 +02:00
void check_ring_signature(const crypto::hash &tx_prefix_hash, const crypto::key_image &key_image,
const std::vector<rct::ctkey> &pubkeys, const std::vector<crypto::signature> &sig, uint64_t &result) const;
/**
* @brief loads block hashes from compiled-in data set
*
* A (possibly empty) set of block hashes can be compiled into the
* monero daemon binary. This function loads those hashes into
* a useful state.
*
* @param get_checkpoints if set, will be called to get checkpoints data
*/
void load_compiled_in_block_hashes(const GetCheckpointsCallback& get_checkpoints);
/**
* @brief expands v2 transaction data from blockchain
*
* RingCT transactions do not transmit some of their data if it
* can be reconstituted by the receiver. This function expands
* that implicit data.
*/
bool expand_transaction_2(transaction &tx, const crypto::hash &tx_prefix_hash, const std::vector<std::vector<rct::ctkey>> &pubkeys) const;
/**
* @brief invalidates any cached block template
*/
void invalidate_block_template_cache();
/**
* @brief stores a new cached block template
*
* At some point, may be used to push an update to miners
*/
void cache_block_template(const block &b, const cryptonote::account_public_address &address, const blobdata &nonce, const difficulty_type &diff, uint64_t height, uint64_t expected_reward, uint64_t pool_cookie);
};
} // namespace cryptonote