2019-03-05 21:05:34 +00:00
|
|
|
// Copyright (c) 2014-2019, The Monero Project
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// All rights reserved.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without modification, are
|
|
|
|
// permitted provided that the following conditions are met:
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 1. Redistributions of source code must retain the above copyright notice, this list of
|
|
|
|
// conditions and the following disclaimer.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
|
|
|
// of conditions and the following disclaimer in the documentation and/or other
|
|
|
|
// materials provided with the distribution.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// 3. Neither the name of the copyright holder nor the names of its contributors may be
|
|
|
|
// used to endorse or promote products derived from this software without specific
|
|
|
|
// prior written permission.
|
2015-12-14 04:54:39 +00:00
|
|
|
//
|
2014-10-06 23:46:25 +00:00
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
|
|
|
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
|
|
|
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2014-10-15 22:33:53 +00:00
|
|
|
#ifndef BLOCKCHAIN_DB_H
|
|
|
|
#define BLOCKCHAIN_DB_H
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-02-08 15:51:57 +00:00
|
|
|
#pragma once
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
#include <string>
|
|
|
|
#include <exception>
|
2017-08-29 10:14:42 +00:00
|
|
|
#include <boost/program_options.hpp>
|
|
|
|
#include "common/command_line.h"
|
2014-10-06 23:46:25 +00:00
|
|
|
#include "crypto/hash.h"
|
2017-11-25 22:25:05 +00:00
|
|
|
#include "cryptonote_basic/blobdatatype.h"
|
2017-01-26 15:07:23 +00:00
|
|
|
#include "cryptonote_basic/cryptonote_basic.h"
|
|
|
|
#include "cryptonote_basic/difficulty.h"
|
2017-03-10 01:20:38 +00:00
|
|
|
#include "cryptonote_basic/hardfork.h"
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/** \file
|
|
|
|
* Cryptonote Blockchain Database Interface
|
2014-10-06 23:46:25 +00:00
|
|
|
*
|
|
|
|
* The DB interface is a store for the canonical block chain.
|
|
|
|
* It serves as a persistent storage for the blockchain.
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* For the sake of efficiency, a concrete implementation may also
|
2014-10-06 23:46:25 +00:00
|
|
|
* store some blockchain data outside of the blocks, such as spent
|
|
|
|
* transfer key images, unspent transaction outputs, etc.
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* Examples are as follows:
|
|
|
|
*
|
2014-10-06 23:46:25 +00:00
|
|
|
* Transactions are duplicated so that we don't have to fetch a whole block
|
2016-03-25 06:22:06 +00:00
|
|
|
* in order to fetch a transaction from that block.
|
2014-10-06 23:46:25 +00:00
|
|
|
*
|
|
|
|
* Spent key images are duplicated outside of the blocks so it is quick
|
|
|
|
* to verify an output hasn't already been spent
|
|
|
|
*
|
|
|
|
* Unspent transaction outputs are duplicated to quickly gather random
|
|
|
|
* outputs to use for mixins
|
|
|
|
*
|
2016-04-04 01:10:58 +00:00
|
|
|
* Indices and Identifiers:
|
|
|
|
* The word "index" is used ambiguously throughout this code. It is
|
|
|
|
* particularly confusing when talking about the output or transaction
|
|
|
|
* tables since their indexing can refer to themselves or each other.
|
|
|
|
* I have attempted to clarify these usages here:
|
|
|
|
*
|
|
|
|
* Blocks, transactions, and outputs are all identified by a hash.
|
|
|
|
* For storage efficiency, a 64-bit integer ID is used instead of the hash
|
|
|
|
* inside the DB. Tables exist to map between hash and ID. A block ID is
|
|
|
|
* also referred to as its "height". Transactions and outputs generally are
|
|
|
|
* not referred to by ID outside of this module, but the tx ID is returned
|
|
|
|
* by tx_exists() and used by get_tx_amount_output_indices(). Like their
|
|
|
|
* corresponding hashes, IDs are globally unique.
|
|
|
|
*
|
|
|
|
* The remaining uses of the word "index" refer to local offsets, and are
|
|
|
|
* not globally unique. An "amount output index" N refers to the Nth output
|
|
|
|
* of a specific amount. An "output local index" N refers to the Nth output
|
|
|
|
* of a specific tx.
|
|
|
|
*
|
2014-10-06 23:46:25 +00:00
|
|
|
* Exceptions:
|
|
|
|
* DB_ERROR -- generic
|
|
|
|
* DB_OPEN_FAILURE
|
|
|
|
* DB_CREATE_FAILURE
|
|
|
|
* DB_SYNC_FAILURE
|
|
|
|
* BLOCK_DNE
|
|
|
|
* BLOCK_PARENT_DNE
|
|
|
|
* BLOCK_EXISTS
|
|
|
|
* BLOCK_INVALID -- considering making this multiple errors
|
|
|
|
* TX_DNE
|
|
|
|
* TX_EXISTS
|
|
|
|
* OUTPUT_DNE
|
2014-10-23 23:47:36 +00:00
|
|
|
* OUTPUT_EXISTS
|
|
|
|
* KEY_IMAGE_EXISTS
|
2014-10-06 23:46:25 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
namespace cryptonote
|
|
|
|
{
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/** a pair of <transaction hash, output index>, typedef for convenience */
|
2014-10-06 23:54:46 +00:00
|
|
|
typedef std::pair<crypto::hash, uint64_t> tx_out_index;
|
|
|
|
|
2017-08-29 10:14:42 +00:00
|
|
|
extern const command_line::arg_descriptor<std::string> arg_db_type;
|
|
|
|
extern const command_line::arg_descriptor<std::string> arg_db_sync_mode;
|
|
|
|
extern const command_line::arg_descriptor<bool, false> arg_db_salvage;
|
|
|
|
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
#pragma pack(push, 1)
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief a struct containing output metadata
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
struct output_data_t
|
|
|
|
{
|
2016-03-25 06:22:06 +00:00
|
|
|
crypto::public_key pubkey; //!< the output's public key (for spend verification)
|
|
|
|
uint64_t unlock_time; //!< the output's unlock time (or height)
|
|
|
|
uint64_t height; //!< the height of the block which created the output
|
2016-06-29 18:55:49 +00:00
|
|
|
rct::key commitment; //!< the output's amount commitment (for spend verification)
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
};
|
|
|
|
#pragma pack(pop)
|
2015-12-14 04:54:39 +00:00
|
|
|
|
2016-03-04 19:56:36 +00:00
|
|
|
#pragma pack(push, 1)
|
|
|
|
struct tx_data_t
|
|
|
|
{
|
2016-04-04 01:10:58 +00:00
|
|
|
uint64_t tx_id;
|
2016-03-04 19:56:36 +00:00
|
|
|
uint64_t unlock_time;
|
2016-04-04 01:10:58 +00:00
|
|
|
uint64_t block_id;
|
2016-03-04 19:56:36 +00:00
|
|
|
};
|
|
|
|
#pragma pack(pop)
|
|
|
|
|
2017-05-14 13:06:55 +00:00
|
|
|
/**
|
|
|
|
* @brief a struct containing txpool per transaction metadata
|
|
|
|
*/
|
|
|
|
struct txpool_tx_meta_t
|
|
|
|
{
|
|
|
|
crypto::hash max_used_block_id;
|
|
|
|
crypto::hash last_failed_id;
|
2018-07-18 21:24:53 +00:00
|
|
|
uint64_t weight;
|
2017-05-14 13:06:55 +00:00
|
|
|
uint64_t fee;
|
|
|
|
uint64_t max_used_block_height;
|
|
|
|
uint64_t last_failed_height;
|
|
|
|
uint64_t receive_time;
|
|
|
|
uint64_t last_relayed_time;
|
|
|
|
// 112 bytes
|
|
|
|
uint8_t kept_by_block;
|
|
|
|
uint8_t relayed;
|
|
|
|
uint8_t do_not_relay;
|
2017-09-22 12:57:20 +00:00
|
|
|
uint8_t double_spend_seen: 1;
|
2018-06-05 18:59:45 +00:00
|
|
|
uint8_t bf_padding: 7;
|
2017-05-14 13:06:55 +00:00
|
|
|
|
2017-09-22 12:57:20 +00:00
|
|
|
uint8_t padding[76]; // till 192 bytes
|
2017-05-14 13:06:55 +00:00
|
|
|
};
|
|
|
|
|
2017-08-19 14:27:13 +00:00
|
|
|
#define DBF_SAFE 1
|
|
|
|
#define DBF_FAST 2
|
|
|
|
#define DBF_FASTEST 4
|
|
|
|
#define DBF_RDONLY 8
|
|
|
|
#define DBF_SALVAGE 0x10
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
/***********************************
|
|
|
|
* Exception Definitions
|
|
|
|
***********************************/
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief A base class for BlockchainDB exceptions
|
|
|
|
*/
|
2016-03-21 10:12:12 +00:00
|
|
|
class DB_EXCEPTION : public std::exception
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
private:
|
|
|
|
std::string m;
|
|
|
|
|
2014-12-06 21:21:17 +00:00
|
|
|
protected:
|
|
|
|
DB_EXCEPTION(const char *s) : m(s) { }
|
|
|
|
|
|
|
|
public:
|
|
|
|
virtual ~DB_EXCEPTION() { }
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
const char* what() const throw()
|
|
|
|
{
|
|
|
|
return m.c_str();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief A generic BlockchainDB exception
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class DB_ERROR : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
DB_ERROR() : DB_EXCEPTION("Generic DB Error") { }
|
|
|
|
DB_ERROR(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when there is an error starting a DB transaction
|
|
|
|
*/
|
2016-02-13 12:10:27 +00:00
|
|
|
class DB_ERROR_TXN_START : public DB_EXCEPTION
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
DB_ERROR_TXN_START() : DB_EXCEPTION("DB Error in starting txn") { }
|
|
|
|
DB_ERROR_TXN_START(const char* s) : DB_EXCEPTION(s) { }
|
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when opening the BlockchainDB fails
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class DB_OPEN_FAILURE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
DB_OPEN_FAILURE() : DB_EXCEPTION("Failed to open the db") { }
|
|
|
|
DB_OPEN_FAILURE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when creating the BlockchainDB fails
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class DB_CREATE_FAILURE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
DB_CREATE_FAILURE() : DB_EXCEPTION("Failed to create the db") { }
|
|
|
|
DB_CREATE_FAILURE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when synchronizing the BlockchainDB to disk fails
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class DB_SYNC_FAILURE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
DB_SYNC_FAILURE() : DB_EXCEPTION("Failed to sync the db") { }
|
|
|
|
DB_SYNC_FAILURE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a requested block does not exist
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class BLOCK_DNE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
BLOCK_DNE() : DB_EXCEPTION("The block requested does not exist") { }
|
|
|
|
BLOCK_DNE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a block's parent does not exist (and it needed to)
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class BLOCK_PARENT_DNE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
BLOCK_PARENT_DNE() : DB_EXCEPTION("The parent of the block does not exist") { }
|
|
|
|
BLOCK_PARENT_DNE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a block exists, but shouldn't, namely when adding a block
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class BLOCK_EXISTS : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
BLOCK_EXISTS() : DB_EXCEPTION("The block to be added already exists!") { }
|
|
|
|
BLOCK_EXISTS(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when something is wrong with the block to be added
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class BLOCK_INVALID : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
BLOCK_INVALID() : DB_EXCEPTION("The block to be added did not pass validation!") { }
|
|
|
|
BLOCK_INVALID(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a requested transaction does not exist
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class TX_DNE : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
TX_DNE() : DB_EXCEPTION("The transaction requested does not exist") { }
|
|
|
|
TX_DNE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a transaction exists, but shouldn't, namely when adding a block
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class TX_EXISTS : public DB_EXCEPTION
|
2014-10-06 23:46:25 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
TX_EXISTS() : DB_EXCEPTION("The transaction to be added already exists!") { }
|
|
|
|
TX_EXISTS(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-06 23:46:25 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a requested output does not exist
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class OUTPUT_DNE : public DB_EXCEPTION
|
2014-10-23 23:47:36 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
OUTPUT_DNE() : DB_EXCEPTION("The output requested does not exist!") { }
|
|
|
|
OUTPUT_DNE(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-23 23:47:36 +00:00
|
|
|
};
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when an output exists, but shouldn't, namely when adding a block
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class OUTPUT_EXISTS : public DB_EXCEPTION
|
2014-10-23 23:47:36 +00:00
|
|
|
{
|
|
|
|
public:
|
2014-12-06 21:21:17 +00:00
|
|
|
OUTPUT_EXISTS() : DB_EXCEPTION("The output to be added already exists!") { }
|
|
|
|
OUTPUT_EXISTS(const char* s) : DB_EXCEPTION(s) { }
|
|
|
|
};
|
2014-10-23 23:47:36 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief thrown when a spent key image exists, but shouldn't, namely when adding a block
|
|
|
|
*/
|
2014-12-06 21:21:17 +00:00
|
|
|
class KEY_IMAGE_EXISTS : public DB_EXCEPTION
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
KEY_IMAGE_EXISTS() : DB_EXCEPTION("The spent key image to be added already exists!") { }
|
|
|
|
KEY_IMAGE_EXISTS(const char* s) : DB_EXCEPTION(s) { }
|
2014-10-23 23:47:36 +00:00
|
|
|
};
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
/***********************************
|
|
|
|
* End of Exception Definitions
|
|
|
|
***********************************/
|
|
|
|
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief The BlockchainDB backing store interface declaration/contract
|
|
|
|
*
|
|
|
|
* This class provides a uniform interface for using BlockchainDB to store
|
|
|
|
* a blockchain. Any implementation of this class will also implement all
|
|
|
|
* functions exposed here, so one can use this class without knowing what
|
|
|
|
* implementation is being used. Refer to each pure virtual function's
|
|
|
|
* documentation here when implementing a BlockchainDB subclass.
|
|
|
|
*
|
|
|
|
* A subclass which encounters an issue should report that issue by throwing
|
|
|
|
* a DB_EXCEPTION which adequately conveys the issue.
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
class BlockchainDB
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
/*********************************************************************
|
|
|
|
* private virtual members
|
|
|
|
*********************************************************************/
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief add the block and metadata to the db
|
|
|
|
*
|
|
|
|
* The subclass implementing this will add the specified block and
|
|
|
|
* block metadata to its backing store. This does not include its
|
|
|
|
* transactions, those are added in a separate step.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param blk the block to be added
|
2018-07-18 21:24:53 +00:00
|
|
|
* @param block_weight the weight of the block (transactions and all)
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
* @param long_term_block_weight the long term block weight of the block (transactions and all)
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param cumulative_difficulty the accumulated difficulty after this block
|
|
|
|
* @param coins_generated the number of coins generated total after this block
|
|
|
|
* @param blk_hash the hash of the block
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void add_block( const block& blk
|
2018-07-18 21:24:53 +00:00
|
|
|
, size_t block_weight
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
, uint64_t long_term_block_weight
|
2014-10-06 23:46:25 +00:00
|
|
|
, const difficulty_type& cumulative_difficulty
|
|
|
|
, const uint64_t& coins_generated
|
2018-05-22 13:46:30 +00:00
|
|
|
, uint64_t num_rct_outs
|
2015-02-11 23:55:53 +00:00
|
|
|
, const crypto::hash& blk_hash
|
2014-10-06 23:46:25 +00:00
|
|
|
) = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief remove data about the top block
|
|
|
|
*
|
|
|
|
* The subclass implementing this will remove the block data from the top
|
|
|
|
* block in the chain. The data to be removed is that which was added in
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
* BlockchainDB::add_block(const block& blk, size_t block_weight, uint64_t long_term_block_weight, const difficulty_type& cumulative_difficulty, const uint64_t& coins_generated, const crypto::hash& blk_hash)
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2014-10-23 19:37:10 +00:00
|
|
|
virtual void remove_block() = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief store the transaction and its metadata
|
|
|
|
*
|
|
|
|
* The subclass implementing this will add the specified transaction data
|
|
|
|
* to its backing store. This includes only the transaction blob itself
|
|
|
|
* and the other data passed here, not the separate outputs of the
|
|
|
|
* transaction.
|
|
|
|
*
|
2016-04-05 20:13:16 +00:00
|
|
|
* It returns a tx ID, which is a mapping from the tx_hash. The tx ID
|
|
|
|
* is used in #add_tx_amount_output_indices().
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param blk_hash the hash of the block containing the transaction
|
|
|
|
* @param tx the transaction to be added
|
|
|
|
* @param tx_hash the hash of the transaction
|
2017-10-01 10:24:33 +00:00
|
|
|
* @param tx_prunable_hash the hash of the prunable part of the transaction
|
2016-04-05 20:13:16 +00:00
|
|
|
* @return the transaction ID
|
2016-03-25 06:22:06 +00:00
|
|
|
*/
|
2018-11-11 14:51:03 +00:00
|
|
|
virtual uint64_t add_transaction_data(const crypto::hash& blk_hash, const std::pair<transaction, blobdata>& tx, const crypto::hash& tx_hash, const crypto::hash& tx_prunable_hash) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief remove data about a transaction
|
|
|
|
*
|
|
|
|
* The subclass implementing this will remove the transaction data
|
|
|
|
* for the passed transaction. The data to be removed was added in
|
|
|
|
* add_transaction_data(). Additionally, current subclasses have behavior
|
|
|
|
* which requires the transaction itself as a parameter here. Future
|
|
|
|
* implementations should note that this parameter is subject to be removed
|
|
|
|
* at a later time.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param tx_hash the hash of the transaction to be removed
|
|
|
|
* @param tx the transaction
|
|
|
|
*/
|
2015-01-12 02:04:04 +00:00
|
|
|
virtual void remove_transaction_data(const crypto::hash& tx_hash, const transaction& tx) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief store an output
|
|
|
|
*
|
|
|
|
* The subclass implementing this will add the output data passed to its
|
|
|
|
* backing store in a suitable manner. In addition, the subclass is responsible
|
|
|
|
* for keeping track of the global output count in some manner, so that
|
|
|
|
* outputs may be indexed by the order in which they were created. In the
|
|
|
|
* future, this tracking (of the number, at least) should be moved to
|
|
|
|
* this class, as it is necessary and the same among all BlockchainDB.
|
|
|
|
*
|
2016-04-05 20:13:16 +00:00
|
|
|
* It returns an amount output index, which is the index of the output
|
|
|
|
* for its specified amount.
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* This data should be stored in such a manner that the only thing needed to
|
|
|
|
* reverse the process is the tx_out.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param tx_hash hash of the transaction the output was created by
|
|
|
|
* @param tx_output the output
|
|
|
|
* @param local_index index of the output in its transaction
|
|
|
|
* @param unlock_time unlock time/height of the output
|
2016-06-29 18:55:49 +00:00
|
|
|
* @param commitment the rct commitment to the output amount
|
2016-04-05 20:13:16 +00:00
|
|
|
* @return amount output index
|
2016-03-25 06:22:06 +00:00
|
|
|
*/
|
2016-06-29 18:55:49 +00:00
|
|
|
virtual uint64_t add_output(const crypto::hash& tx_hash, const tx_out& tx_output, const uint64_t& local_index, const uint64_t unlock_time, const rct::key *commitment) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
2016-04-05 20:13:16 +00:00
|
|
|
* @brief store amount output indices for a tx's outputs
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
2016-04-05 20:13:16 +00:00
|
|
|
* The subclass implementing this will add the amount output indices to its
|
|
|
|
* backing store in a suitable manner. The tx_id will be the same one that
|
|
|
|
* was returned from #add_output().
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
2016-04-05 20:13:16 +00:00
|
|
|
* @param tx_id ID of the transaction containing these outputs
|
|
|
|
* @param amount_output_indices the amount output indices of the transaction
|
2016-03-25 06:22:06 +00:00
|
|
|
*/
|
2016-04-05 20:13:16 +00:00
|
|
|
virtual void add_tx_amount_output_indices(const uint64_t tx_id, const std::vector<uint64_t>& amount_output_indices) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief store a spent key
|
|
|
|
*
|
|
|
|
* The subclass implementing this will store the spent key image.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param k_image the spent key image to store
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void add_spent_key(const crypto::key_image& k_image) = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief remove a spent key
|
|
|
|
*
|
|
|
|
* The subclass implementing this will remove the key image.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param k_image the spent key image to remove
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void remove_spent_key(const crypto::key_image& k_image) = 0;
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* private concrete members
|
2015-12-14 04:54:39 +00:00
|
|
|
*********************************************************************/
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief private version of pop_block, for undoing if an add_block fails
|
|
|
|
*
|
|
|
|
* This function simply calls pop_block(block& blk, std::vector<transaction>& txs)
|
|
|
|
* with dummy parameters, as the returns-by-reference can be discarded.
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
void pop_block();
|
|
|
|
|
|
|
|
// helper function to remove transaction from blockchain
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief helper function to remove transaction from the blockchain
|
|
|
|
*
|
|
|
|
* This function encapsulates aspects of removing a transaction.
|
|
|
|
*
|
|
|
|
* @param tx_hash the hash of the transaction to be removed
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
void remove_transaction(const crypto::hash& tx_hash);
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
uint64_t num_calls = 0; //!< a performance metric
|
|
|
|
uint64_t time_blk_hash = 0; //!< a performance metric
|
|
|
|
uint64_t time_add_block1 = 0; //!< a performance metric
|
|
|
|
uint64_t time_add_transaction = 0; //!< a performance metric
|
2015-02-11 23:55:53 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2015-02-11 23:55:53 +00:00
|
|
|
protected:
|
|
|
|
|
2016-04-10 16:25:13 +00:00
|
|
|
/**
|
|
|
|
* @brief helper function for add_transactions, to add each individual transaction
|
|
|
|
*
|
|
|
|
* This function is called by add_transactions() for each transaction to be
|
|
|
|
* added.
|
|
|
|
*
|
|
|
|
* @param blk_hash hash of the block which has the transaction
|
|
|
|
* @param tx the transaction to add
|
|
|
|
* @param tx_hash_ptr the hash of the transaction, if already calculated
|
2017-10-01 10:24:33 +00:00
|
|
|
* @param tx_prunable_hash_ptr the hash of the prunable part of the transaction, if already calculated
|
2016-04-10 16:25:13 +00:00
|
|
|
*/
|
2018-11-11 14:51:03 +00:00
|
|
|
void add_transaction(const crypto::hash& blk_hash, const std::pair<transaction, blobdata>& tx, const crypto::hash* tx_hash_ptr = NULL, const crypto::hash* tx_prunable_hash_ptr = NULL);
|
2016-04-10 16:25:13 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
mutable uint64_t time_tx_exists = 0; //!< a performance metric
|
|
|
|
uint64_t time_commit1 = 0; //!< a performance metric
|
|
|
|
bool m_auto_remove_logs = true; //!< whether or not to automatically remove old logs
|
2015-02-11 23:55:53 +00:00
|
|
|
|
2016-02-08 15:51:57 +00:00
|
|
|
HardFork* m_hardfork;
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
public:
|
2014-10-15 22:33:53 +00:00
|
|
|
|
2018-02-02 15:21:39 +00:00
|
|
|
/**
|
|
|
|
* @brief An empty constructor.
|
|
|
|
*/
|
2018-10-16 10:09:57 +00:00
|
|
|
BlockchainDB(): m_hardfork(NULL), m_open(false) { }
|
2018-02-02 15:21:39 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief An empty destructor.
|
|
|
|
*/
|
2014-10-15 22:33:53 +00:00
|
|
|
virtual ~BlockchainDB() { };
|
|
|
|
|
2017-08-29 10:14:42 +00:00
|
|
|
/**
|
|
|
|
* @brief init command line options
|
|
|
|
*/
|
|
|
|
static void init_options(boost::program_options::options_description& desc);
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief reset profiling stats
|
|
|
|
*/
|
2015-02-11 23:55:53 +00:00
|
|
|
void reset_stats();
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief show profiling stats
|
|
|
|
*
|
|
|
|
* This function prints current performance/profiling data to whichever
|
|
|
|
* log file(s) are set up (possibly including stdout or stderr)
|
|
|
|
*/
|
2015-02-11 23:55:53 +00:00
|
|
|
void show_stats();
|
2014-10-15 22:33:53 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief open a db, or create it if necessary.
|
|
|
|
*
|
|
|
|
* This function opens an existing database or creates it if it
|
|
|
|
* does not exist.
|
|
|
|
*
|
|
|
|
* The subclass implementing this will handle all file opening/creation,
|
|
|
|
* and is responsible for maintaining its state.
|
|
|
|
*
|
|
|
|
* The parameter <filename> may not refer to a file name, necessarily, but
|
|
|
|
* could be an IP:PORT for a database which needs it, and so on. Calling it
|
|
|
|
* <filename> is convenient and should be descriptive enough, however.
|
|
|
|
*
|
|
|
|
* For now, db_flags are
|
|
|
|
* specific to the subclass being instantiated. This is subject to change,
|
|
|
|
* and the db_flags parameter may be deprecated.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param filename a string referring to the BlockchainDB to open
|
|
|
|
* @param db_flags flags relevant to how to open/use the BlockchainDB
|
|
|
|
*/
|
2015-02-12 00:02:20 +00:00
|
|
|
virtual void open(const std::string& filename, const int db_flags = 0) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief Gets the current open/ready state of the BlockchainDB
|
|
|
|
*
|
|
|
|
* @return true if open/ready, otherwise false
|
|
|
|
*/
|
2015-05-27 18:03:46 +00:00
|
|
|
bool is_open() const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief close the BlockchainDB
|
|
|
|
*
|
|
|
|
* At minimum, this call ensures that further use of the BlockchainDB
|
|
|
|
* instance will not have effect. In any case where it is necessary
|
|
|
|
* to do so, a subclass implementing this will sync with disk.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void close() = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief sync the BlockchainDB with disk
|
|
|
|
*
|
|
|
|
* This function should write any changes to whatever permanent backing
|
|
|
|
* store the subclass uses. Example: a BlockchainDB instance which
|
|
|
|
* keeps the whole blockchain in RAM won't need to regularly access a
|
|
|
|
* disk, but should write out its state when this is called.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void sync() = 0;
|
|
|
|
|
2017-08-19 18:36:51 +00:00
|
|
|
/**
|
|
|
|
* @brief toggle safe syncs for the DB
|
|
|
|
*
|
|
|
|
* Used to switch DBF_SAFE on or off after starting up with DBF_FAST.
|
|
|
|
*/
|
|
|
|
virtual void safesyncmode(const bool onoff) = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief Remove everything from the BlockchainDB
|
|
|
|
*
|
|
|
|
* This function should completely remove all data from a BlockchainDB.
|
|
|
|
*
|
|
|
|
* Use with caution!
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void reset() = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get all files used by the BlockchainDB (if any)
|
|
|
|
*
|
|
|
|
* This function is largely for ease of automation, namely for unit tests.
|
|
|
|
*
|
|
|
|
* The subclass implementation should return all filenames it uses.
|
|
|
|
*
|
|
|
|
* @return a list of filenames
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual std::vector<std::string> get_filenames() const = 0;
|
2014-10-15 22:33:53 +00:00
|
|
|
|
2018-06-14 19:11:49 +00:00
|
|
|
/**
|
|
|
|
* @brief remove file(s) storing the database
|
|
|
|
*
|
|
|
|
* This function is for resetting the database (for core tests, functional tests, etc).
|
|
|
|
* The function reset() is not usable because it needs to open the database file first
|
|
|
|
* which can fail if the existing database file is in an incompatible format.
|
|
|
|
* As such, this function needs to be called before calling open().
|
|
|
|
*
|
|
|
|
* @param folder The path of the folder containing the database file(s) which must not end with slash '/'.
|
|
|
|
*
|
|
|
|
* @return true if the operation is succesfull
|
|
|
|
*/
|
|
|
|
virtual bool remove_data_file(const std::string& folder) const = 0;
|
|
|
|
|
2015-03-14 01:39:27 +00:00
|
|
|
// return the name of the folder the db's file(s) should reside in
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets the name of the folder the BlockchainDB's file(s) should be in
|
|
|
|
*
|
|
|
|
* The subclass implementation should return the name of the folder in which
|
|
|
|
* it stores files, or an empty string if there is none.
|
|
|
|
*
|
|
|
|
* @return the name of the folder with the BlockchainDB's files, if any.
|
|
|
|
*/
|
2015-03-14 01:39:27 +00:00
|
|
|
virtual std::string get_db_name() const = 0;
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// FIXME: these are just for functionality mocking, need to implement
|
|
|
|
// RAII-friendly and multi-read one-write friendly locking mechanism
|
|
|
|
//
|
|
|
|
// acquire db lock
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief acquires the BlockchainDB lock
|
|
|
|
*
|
|
|
|
* This function is a stub until such a time as locking is implemented at
|
|
|
|
* this level.
|
|
|
|
*
|
|
|
|
* The subclass implementation should return true unless implementing a
|
|
|
|
* locking scheme of some sort, in which case it should return true upon
|
|
|
|
* acquisition of the lock and block until then.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @return true, unless at a future time false makes sense (timeout, etc)
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual bool lock() = 0;
|
|
|
|
|
|
|
|
// release db lock
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief This function releases the BlockchainDB lock
|
|
|
|
*
|
|
|
|
* The subclass, should it have implemented lock(), will release any lock
|
|
|
|
* held by the calling thread. In the case of recursive locking, it should
|
|
|
|
* release one instance of a lock.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2014-10-06 23:46:25 +00:00
|
|
|
virtual void unlock() = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief tells the BlockchainDB to start a new "batch" of blocks
|
|
|
|
*
|
|
|
|
* If the subclass implements a batching method of caching blocks in RAM to
|
|
|
|
* be added to a backing store in groups, it should start a batch which will
|
|
|
|
* end either when <batch_num_blocks> has been added or batch_stop() has
|
|
|
|
* been called. In either case, it should end the batch and write to its
|
|
|
|
* backing store.
|
|
|
|
*
|
2016-12-26 22:29:46 +00:00
|
|
|
* If a batch is already in-progress, this function must return false.
|
|
|
|
* If a batch was started by this call, it must return true.
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param batch_num_blocks number of blocks to batch together
|
2016-12-26 22:29:46 +00:00
|
|
|
*
|
|
|
|
* @return true if we started the batch, false if already started
|
2016-03-25 06:22:06 +00:00
|
|
|
*/
|
2017-09-16 23:12:42 +00:00
|
|
|
virtual bool batch_start(uint64_t batch_num_blocks=0, uint64_t batch_bytes=0) = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief ends a batch transaction
|
|
|
|
*
|
|
|
|
* If the subclass implements batching, this function should store the
|
|
|
|
* batch it is currently on and mark it finished.
|
|
|
|
*
|
|
|
|
* If no batch is in-progress, this function should throw a DB_ERROR.
|
|
|
|
* This exception may change in the future if it is deemed necessary to
|
|
|
|
* have a more granular exception type for this scenario.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*/
|
2015-02-11 23:55:53 +00:00
|
|
|
virtual void batch_stop() = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief sets whether or not to batch transactions
|
|
|
|
*
|
|
|
|
* If the subclass implements batching, this function tells it to begin
|
|
|
|
* batching automatically.
|
|
|
|
*
|
|
|
|
* If the subclass implements batching and has a batch in-progress, a
|
|
|
|
* parameter of false should disable batching and call batch_stop() to
|
|
|
|
* store the current batch.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param bool batch whether or not to use batch transactions.
|
|
|
|
*/
|
2015-02-11 23:55:53 +00:00
|
|
|
virtual void set_batch_transactions(bool) = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-02-18 12:09:57 +00:00
|
|
|
virtual void block_txn_start(bool readonly=false) = 0;
|
2016-02-08 16:32:19 +00:00
|
|
|
virtual void block_txn_stop() = 0;
|
|
|
|
virtual void block_txn_abort() = 0;
|
|
|
|
|
2016-02-08 20:56:31 +00:00
|
|
|
virtual void set_hard_fork(HardFork* hf);
|
2016-02-08 15:51:57 +00:00
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
// adds a block with the given metadata to the top of the blockchain, returns the new height
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief handles the addition of a new block to BlockchainDB
|
|
|
|
*
|
|
|
|
* This function organizes block addition and calls various functions as
|
|
|
|
* necessary.
|
|
|
|
*
|
|
|
|
* NOTE: subclass implementations of this (or the functions it calls) need
|
|
|
|
* to handle undoing any partially-added blocks in the event of a failure.
|
|
|
|
*
|
|
|
|
* If any of this cannot be done, the subclass should throw the corresponding
|
|
|
|
* subclass of DB_EXCEPTION
|
|
|
|
*
|
|
|
|
* @param blk the block to be added
|
2018-07-18 21:24:53 +00:00
|
|
|
* @param block_weight the size of the block (transactions and all)
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
* @param long_term_block_weight the long term weight of the block (transactions and all)
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param cumulative_difficulty the accumulated difficulty after this block
|
|
|
|
* @param coins_generated the number of coins generated total after this block
|
|
|
|
* @param txs the transactions in the block
|
|
|
|
*
|
|
|
|
* @return the height of the chain post-addition
|
|
|
|
*/
|
2018-11-11 14:51:03 +00:00
|
|
|
virtual uint64_t add_block( const std::pair<block, blobdata>& blk
|
2018-07-18 21:24:53 +00:00
|
|
|
, size_t block_weight
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
, uint64_t long_term_block_weight
|
2014-10-23 19:37:10 +00:00
|
|
|
, const difficulty_type& cumulative_difficulty
|
|
|
|
, const uint64_t& coins_generated
|
2018-11-11 14:51:03 +00:00
|
|
|
, const std::vector<std::pair<transaction, blobdata>>& txs
|
2014-10-23 19:37:10 +00:00
|
|
|
);
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief checks if a block exists
|
|
|
|
*
|
|
|
|
* @param h the hash of the requested block
|
2016-08-30 15:39:33 +00:00
|
|
|
* @param height if non NULL, returns the block's height if found
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return true of the block exists, otherwise false
|
|
|
|
*/
|
2016-08-30 15:39:33 +00:00
|
|
|
virtual bool block_exists(const crypto::hash& h, uint64_t *height = NULL) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the block with the given hash
|
|
|
|
*
|
|
|
|
* The subclass should return the requested block.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the block requested
|
|
|
|
*/
|
2017-01-15 16:05:55 +00:00
|
|
|
virtual cryptonote::blobdata get_block_blob(const crypto::hash& h) const = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief fetches the block with the given hash
|
|
|
|
*
|
|
|
|
* Returns the requested block.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the block requested
|
|
|
|
*/
|
2017-03-10 01:20:38 +00:00
|
|
|
virtual block get_block(const crypto::hash& h) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets the height of the block with a given hash
|
|
|
|
*
|
|
|
|
* The subclass should return the requested height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the height
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_block_height(const crypto::hash& h) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block header
|
|
|
|
*
|
|
|
|
* The subclass should return the block header from the block with
|
|
|
|
* the given hash.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the block header
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual block_header get_block_header(const crypto::hash& h) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
2017-01-15 16:05:55 +00:00
|
|
|
* @brief fetch a block blob by height
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* The subclass should return the block at the given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, that is to say if the blockchain is not
|
|
|
|
* that high, then the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height to look for
|
|
|
|
*
|
2017-01-15 16:05:55 +00:00
|
|
|
* @return the block blob
|
|
|
|
*/
|
|
|
|
virtual cryptonote::blobdata get_block_blob_from_height(const uint64_t& height) const = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief fetch a block by height
|
|
|
|
*
|
|
|
|
* If the block does not exist, that is to say if the blockchain is not
|
|
|
|
* that high, then the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height to look for
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* @return the block
|
|
|
|
*/
|
2017-03-10 01:20:38 +00:00
|
|
|
virtual block get_block_from_height(const uint64_t& height) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's timestamp
|
|
|
|
*
|
|
|
|
* The subclass should return the timestamp of the block with the
|
|
|
|
* given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the timestamp
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_block_timestamp(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2018-05-22 13:46:30 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's cumulative number of rct outputs
|
|
|
|
*
|
|
|
|
* The subclass should return the numer of rct outputs in the blockchain
|
|
|
|
* up to the block with the given height (inclusive).
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the cumulative number of rct outputs
|
|
|
|
*/
|
|
|
|
virtual std::vector<uint64_t> get_block_cumulative_rct_outputs(const std::vector<uint64_t> &heights) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the top block's timestamp
|
|
|
|
*
|
|
|
|
* The subclass should return the timestamp of the most recent block.
|
|
|
|
*
|
|
|
|
* @return the top block's timestamp
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_top_block_timestamp() const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
2018-07-18 21:24:53 +00:00
|
|
|
* @brief fetch a block's weight
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
2018-07-18 21:24:53 +00:00
|
|
|
* The subclass should return the weight of the block with the
|
2016-03-25 06:22:06 +00:00
|
|
|
* given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
2018-07-18 21:24:53 +00:00
|
|
|
* @return the weight
|
2016-03-25 06:22:06 +00:00
|
|
|
*/
|
2018-07-18 21:24:53 +00:00
|
|
|
virtual size_t get_block_weight(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2019-02-21 00:13:21 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the last N blocks' weights
|
|
|
|
*
|
|
|
|
* If there are fewer than N blocks, the returned array will be smaller than N
|
|
|
|
*
|
|
|
|
* @param count the number of blocks requested
|
|
|
|
*
|
|
|
|
* @return the weights
|
|
|
|
*/
|
|
|
|
virtual std::vector<uint64_t> get_block_weights(uint64_t start_height, size_t count) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's cumulative difficulty
|
|
|
|
*
|
|
|
|
* The subclass should return the cumulative difficulty of the block with the
|
|
|
|
* given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the cumulative difficulty
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual difficulty_type get_block_cumulative_difficulty(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's difficulty
|
|
|
|
*
|
|
|
|
* The subclass should return the difficulty of the block with the
|
|
|
|
* given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the difficulty
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual difficulty_type get_block_difficulty(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's already generated coins
|
|
|
|
*
|
|
|
|
* The subclass should return the total coins generated as of the block
|
|
|
|
* with the given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the already generated coins
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_block_already_generated_coins(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
ArticMine's new block weight algorithm
This curbs runaway growth while still allowing substantial
spikes in block weight
Original specification from ArticMine:
here is the scaling proposal
Define: LongTermBlockWeight
Before fork:
LongTermBlockWeight = BlockWeight
At or after fork:
LongTermBlockWeight = min(BlockWeight, 1.4*LongTermEffectiveMedianBlockWeight)
Note: To avoid possible consensus issues over rounding the LongTermBlockWeight for a given block should be calculated to the nearest byte, and stored as a integer in the block itself. The stored LongTermBlockWeight is then used for future calculations of the LongTermEffectiveMedianBlockWeight and not recalculated each time.
Define: LongTermEffectiveMedianBlockWeight
LongTermEffectiveMedianBlockWeight = max(300000, MedianOverPrevious100000Blocks(LongTermBlockWeight))
Change Definition of EffectiveMedianBlockWeight
From (current definition)
EffectiveMedianBlockWeight = max(300000, MedianOverPrevious100Blocks(BlockWeight))
To (proposed definition)
EffectiveMedianBlockWeight = min(max(300000, MedianOverPrevious100Blocks(BlockWeight)), 50*LongTermEffectiveMedianBlockWeight)
Notes:
1) There are no other changes to the existing penalty formula, median calculation, fees etc.
2) There is the requirement to store the LongTermBlockWeight of a block unencrypted in the block itself. This is to avoid possible consensus issues over rounding and also to prevent the calculations from becoming unwieldy as we move away from the fork.
3) When the EffectiveMedianBlockWeight cap is reached it is still possible to mine blocks up to 2x the EffectiveMedianBlockWeight by paying the corresponding penalty.
Note: the long term block weight is stored in the database, but not in the actual block itself,
since it requires recalculating anyway for verification.
2019-01-21 17:18:50 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's long term weight
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the long term weight
|
|
|
|
*/
|
|
|
|
virtual uint64_t get_block_long_term_weight(const uint64_t& height) const = 0;
|
|
|
|
|
2019-02-21 00:13:21 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the last N blocks' long term weights
|
|
|
|
*
|
|
|
|
* If there are fewer than N blocks, the returned array will be smaller than N
|
|
|
|
*
|
|
|
|
* @param count the number of blocks requested
|
|
|
|
*
|
|
|
|
* @return the weights
|
|
|
|
*/
|
|
|
|
virtual std::vector<uint64_t> get_long_term_block_weights(uint64_t start_height, size_t count) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a block's hash
|
|
|
|
*
|
|
|
|
* The subclass should return hash of the block with the
|
|
|
|
* given height.
|
|
|
|
*
|
|
|
|
* If the block does not exist, the subclass should throw BLOCK_DNE
|
|
|
|
*
|
|
|
|
* @param height the height requested
|
|
|
|
*
|
|
|
|
* @return the hash
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual crypto::hash get_block_hash_from_height(const uint64_t& height) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a list of blocks
|
|
|
|
*
|
|
|
|
* The subclass should return a vector of blocks with heights starting at
|
|
|
|
* h1 and ending at h2, inclusively.
|
|
|
|
*
|
|
|
|
* If the height range requested goes past the end of the blockchain,
|
|
|
|
* the subclass should throw BLOCK_DNE. (current implementations simply
|
|
|
|
* don't catch this exception as thrown by methods called within)
|
|
|
|
*
|
|
|
|
* @param h1 the start height
|
|
|
|
* @param h2 the end height
|
|
|
|
*
|
|
|
|
* @return a vector of blocks
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual std::vector<block> get_blocks_range(const uint64_t& h1, const uint64_t& h2) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a list of block hashes
|
|
|
|
*
|
|
|
|
* The subclass should return a vector of block hashes from blocks with
|
|
|
|
* heights starting at h1 and ending at h2, inclusively.
|
|
|
|
*
|
|
|
|
* If the height range requested goes past the end of the blockchain,
|
|
|
|
* the subclass should throw BLOCK_DNE. (current implementations simply
|
|
|
|
* don't catch this exception as thrown by methods called within)
|
|
|
|
*
|
|
|
|
* @param h1 the start height
|
|
|
|
* @param h2 the end height
|
|
|
|
*
|
|
|
|
* @return a vector of block hashes
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual std::vector<crypto::hash> get_hashes_range(const uint64_t& h1, const uint64_t& h2) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the top block's hash
|
|
|
|
*
|
|
|
|
* The subclass should return the hash of the most recent block
|
|
|
|
*
|
2018-11-20 20:19:39 +00:00
|
|
|
* @param block_height if non NULL, returns the height of that block (ie, the blockchain height minus 1)
|
|
|
|
*
|
2016-03-25 06:22:06 +00:00
|
|
|
* @return the top block's hash
|
|
|
|
*/
|
2018-11-20 20:19:39 +00:00
|
|
|
virtual crypto::hash top_block_hash(uint64_t *block_height = NULL) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the top block
|
|
|
|
*
|
|
|
|
* The subclass should return most recent block
|
|
|
|
*
|
|
|
|
* @return the top block
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual block get_top_block() const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch the current blockchain height
|
|
|
|
*
|
|
|
|
* The subclass should return the current blockchain height
|
|
|
|
*
|
|
|
|
* @return the current blockchain height
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t height() const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* <!--
|
|
|
|
* TODO: Rewrite (if necessary) such that all calls to remove_* are
|
|
|
|
* done in concrete members of this base class.
|
|
|
|
* -->
|
|
|
|
*
|
|
|
|
* @brief pops the top block off the blockchain
|
|
|
|
*
|
|
|
|
* The subclass should remove the most recent block from the blockchain,
|
|
|
|
* along with all transactions, outputs, and other metadata created as
|
|
|
|
* a result of its addition to the blockchain. Most of this is handled
|
|
|
|
* by the concrete members of the base class provided the subclass correctly
|
|
|
|
* implements remove_* functions.
|
|
|
|
*
|
|
|
|
* The subclass should return by reference the popped block and
|
|
|
|
* its associated transactions
|
|
|
|
*
|
|
|
|
* @param blk return-by-reference the block which was popped
|
|
|
|
* @param txs return-by-reference the transactions from the popped block
|
|
|
|
*/
|
2014-10-30 22:33:35 +00:00
|
|
|
virtual void pop_block(block& blk, std::vector<transaction>& txs);
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief check if a transaction with a given hash exists
|
|
|
|
*
|
|
|
|
* The subclass should check if a transaction is stored which has the
|
|
|
|
* given hash and return true if so, false otherwise.
|
|
|
|
*
|
|
|
|
* @param h the hash to check against
|
2016-04-05 20:13:16 +00:00
|
|
|
* @param tx_id (optional) returns the tx_id for the tx hash
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return true if the transaction exists, otherwise false
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual bool tx_exists(const crypto::hash& h) const = 0;
|
2016-04-04 01:10:58 +00:00
|
|
|
virtual bool tx_exists(const crypto::hash& h, uint64_t& tx_id) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// return unlock time of tx with hash <h>
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetch a transaction's unlock time/height
|
|
|
|
*
|
|
|
|
* The subclass should return the stored unlock time for the transaction
|
|
|
|
* with the given hash.
|
|
|
|
*
|
|
|
|
* If no such transaction exists, the subclass should throw TX_DNE.
|
|
|
|
*
|
|
|
|
* @param h the hash of the requested transaction
|
|
|
|
*
|
|
|
|
* @return the unlock time/height
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_tx_unlock_time(const crypto::hash& h) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// return tx with hash <h>
|
|
|
|
// throw if no such tx exists
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the transaction with the given hash
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should throw TX_DNE.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the transaction with the given hash
|
|
|
|
*/
|
2017-03-10 01:20:38 +00:00
|
|
|
virtual transaction get_tx(const crypto::hash& h) const;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the transaction base with the given hash
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should throw TX_DNE.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return the transaction with the given hash
|
|
|
|
*/
|
|
|
|
virtual transaction get_pruned_tx(const crypto::hash& h) const;
|
|
|
|
|
2016-12-23 16:38:28 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the transaction with the given hash
|
|
|
|
*
|
2017-01-15 16:05:55 +00:00
|
|
|
* If the transaction does not exist, the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found
|
|
|
|
*/
|
2017-03-10 01:20:38 +00:00
|
|
|
virtual bool get_tx(const crypto::hash& h, transaction &tx) const;
|
2017-01-15 16:05:55 +00:00
|
|
|
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the transaction base with the given hash
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found
|
|
|
|
*/
|
|
|
|
virtual bool get_pruned_tx(const crypto::hash& h, transaction &tx) const;
|
|
|
|
|
2017-01-15 16:05:55 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the transaction blob with the given hash
|
|
|
|
*
|
2016-12-23 16:38:28 +00:00
|
|
|
* The subclass should return the transaction stored which has the given
|
|
|
|
* hash.
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found
|
|
|
|
*/
|
2017-01-15 16:05:55 +00:00
|
|
|
virtual bool get_tx_blob(const crypto::hash& h, cryptonote::blobdata &tx) const = 0;
|
2016-12-23 16:38:28 +00:00
|
|
|
|
2017-10-01 10:24:33 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the pruned transaction blob with the given hash
|
|
|
|
*
|
|
|
|
* The subclass should return the pruned transaction stored which has the given
|
|
|
|
* hash.
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found
|
|
|
|
*/
|
|
|
|
virtual bool get_pruned_tx_blob(const crypto::hash& h, cryptonote::blobdata &tx) const = 0;
|
|
|
|
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the prunable transaction blob with the given hash
|
|
|
|
*
|
|
|
|
* The subclass should return the prunable transaction stored which has the given
|
|
|
|
* hash.
|
|
|
|
*
|
|
|
|
* If the transaction does not exist, or if we do not have that prunable data,
|
|
|
|
* the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found and we have its prunable data
|
|
|
|
*/
|
|
|
|
virtual bool get_prunable_tx_blob(const crypto::hash& h, cryptonote::blobdata &tx) const = 0;
|
|
|
|
|
2017-10-01 10:24:33 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the prunable transaction hash
|
|
|
|
*
|
|
|
|
* The subclass should return the hash of the prunable transaction data.
|
|
|
|
*
|
|
|
|
* If the transaction hash does not exist, the subclass should return false.
|
|
|
|
*
|
|
|
|
* @param h the tx hash to look for
|
|
|
|
*
|
|
|
|
* @return true iff the transaction was found
|
|
|
|
*/
|
|
|
|
virtual bool get_prunable_tx_hash(const crypto::hash& tx_hash, crypto::hash &prunable_hash) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the total number of transactions ever
|
|
|
|
*
|
|
|
|
* The subclass should return a count of all the transactions from
|
|
|
|
* all blocks.
|
|
|
|
*
|
|
|
|
* @return the number of transactions in the blockchain
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_tx_count() const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches a list of transactions based on their hashes
|
|
|
|
*
|
|
|
|
* The subclass should attempt to fetch each transaction referred to by
|
|
|
|
* the hashes passed.
|
|
|
|
*
|
|
|
|
* Currently, if any of the transactions is not in BlockchainDB, the call
|
|
|
|
* to get_tx in the implementation will throw TX_DNE.
|
|
|
|
*
|
|
|
|
* <!-- TODO: decide if this behavior is correct for missing transactions -->
|
|
|
|
*
|
|
|
|
* @param hlist a list of hashes
|
|
|
|
*
|
|
|
|
* @return the list of transactions
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual std::vector<transaction> get_tx_list(const std::vector<crypto::hash>& hlist) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// returns height of block that contains transaction with hash <h>
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the height of a transaction's block
|
|
|
|
*
|
|
|
|
* The subclass should attempt to return the height of the block containing
|
|
|
|
* the transaction with the given hash.
|
|
|
|
*
|
|
|
|
* If the transaction cannot be found, the subclass should throw TX_DNE.
|
|
|
|
*
|
|
|
|
* @param h the hash of the transaction
|
|
|
|
*
|
|
|
|
* @return the height of the transaction's block
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_tx_block_height(const crypto::hash& h) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
// returns the total number of outputs of amount <amount>
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief fetches the number of outputs of a given amount
|
|
|
|
*
|
|
|
|
* The subclass should return a count of outputs of the given amount,
|
|
|
|
* or zero if there are none.
|
|
|
|
*
|
|
|
|
* <!-- TODO: should outputs spent with a low mixin (especially 0) be
|
|
|
|
* excluded from the count? -->
|
|
|
|
*
|
|
|
|
* @param amount the output amount being looked up
|
|
|
|
*
|
|
|
|
* @return the number of outputs of the given amount
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual uint64_t get_num_outputs(const uint64_t& amount) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief return index of the first element (should be hidden, but isn't)
|
|
|
|
*
|
|
|
|
* @return the index
|
|
|
|
*/
|
2015-12-05 18:41:29 +00:00
|
|
|
virtual uint64_t get_indexing_base() const { return 0; }
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief get some of an output's data
|
|
|
|
*
|
|
|
|
* The subclass should return the public key, unlock time, and block height
|
|
|
|
* for the output with the given amount and index, collected in a struct.
|
|
|
|
*
|
|
|
|
* If the output cannot be found, the subclass should throw OUTPUT_DNE.
|
|
|
|
*
|
|
|
|
* If any of these parts cannot be found, but some are, the subclass
|
|
|
|
* should throw DB_ERROR with a message stating as much.
|
|
|
|
*
|
|
|
|
* @param amount the output amount
|
|
|
|
* @param index the output's index (indexed by amount)
|
|
|
|
*
|
|
|
|
* @return the requested output data
|
|
|
|
*/
|
2018-12-30 12:12:11 +00:00
|
|
|
virtual output_data_t get_output_key(const uint64_t& amount, const uint64_t& index, bool include_commitmemt = true) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets an output's tx hash and index
|
|
|
|
*
|
|
|
|
* The subclass should return the hash of the transaction which created the
|
|
|
|
* output with the global index given, as well as its index in that transaction.
|
|
|
|
*
|
|
|
|
* @param index an output's global index
|
|
|
|
*
|
|
|
|
* @return the tx hash and output index
|
|
|
|
*/
|
2014-12-14 20:20:41 +00:00
|
|
|
virtual tx_out_index get_output_tx_and_index_from_global(const uint64_t& index) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets an output's tx hash and index
|
|
|
|
*
|
|
|
|
* The subclass should return the hash of the transaction which created the
|
|
|
|
* output with the amount and index given, as well as its index in that
|
|
|
|
* transaction.
|
|
|
|
*
|
|
|
|
* @param amount an output amount
|
|
|
|
* @param index an output's amount-specific index
|
|
|
|
*
|
|
|
|
* @return the tx hash and output index
|
|
|
|
*/
|
2016-08-01 21:16:00 +00:00
|
|
|
virtual tx_out_index get_output_tx_and_index(const uint64_t& amount, const uint64_t& index) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief gets some outputs' tx hashes and indices
|
|
|
|
*
|
|
|
|
* This function is a mirror of
|
|
|
|
* get_output_tx_and_index(const uint64_t& amount, const uint64_t& index),
|
|
|
|
* but for a list of outputs rather than just one.
|
|
|
|
*
|
|
|
|
* @param amount an output amount
|
|
|
|
* @param offsets a list of amount-specific output indices
|
|
|
|
* @param indices return-by-reference a list of tx hashes and output indices (as pairs)
|
|
|
|
*/
|
2016-08-01 21:16:00 +00:00
|
|
|
virtual void get_output_tx_and_index(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<tx_out_index> &indices) const = 0;
|
2016-03-25 06:42:42 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets outputs' data
|
|
|
|
*
|
|
|
|
* This function is a mirror of
|
|
|
|
* get_output_data(const uint64_t& amount, const uint64_t& index)
|
|
|
|
* but for a list of outputs rather than just one.
|
|
|
|
*
|
2018-11-07 21:13:00 +00:00
|
|
|
* @param amounts an output amount, or as many as offsets
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param offsets a list of amount-specific output indices
|
|
|
|
* @param outputs return-by-reference a list of outputs' metadata
|
|
|
|
*/
|
2018-12-11 08:50:27 +00:00
|
|
|
virtual void get_output_key(const epee::span<const uint64_t> &amounts, const std::vector<uint64_t> &offsets, std::vector<output_data_t> &outputs, bool allow_partial = false) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: Need to check with git blame and ask what this does to
|
|
|
|
* document it
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
virtual bool can_thread_bulk_indices() const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief gets output indices (amount-specific) for a transaction's outputs
|
|
|
|
*
|
|
|
|
* The subclass should fetch the amount-specific output indices for each
|
2016-04-05 20:13:16 +00:00
|
|
|
* output in the transaction with the given ID.
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* If the transaction does not exist, the subclass should throw TX_DNE.
|
|
|
|
*
|
|
|
|
* If an output cannot be found, the subclass should throw OUTPUT_DNE.
|
|
|
|
*
|
2016-04-05 20:13:16 +00:00
|
|
|
* @param tx_id a transaction ID
|
2018-12-16 13:28:49 +00:00
|
|
|
* @param n_txes how many txes to get data for, starting with tx_id
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return a list of amount-specific output indices
|
|
|
|
*/
|
2018-12-16 13:28:49 +00:00
|
|
|
virtual std::vector<std::vector<uint64_t>> get_tx_amount_output_indices(const uint64_t tx_id, size_t n_txes = 1) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief check if a key image is stored as spent
|
|
|
|
*
|
|
|
|
* @param img the key image to check for
|
|
|
|
*
|
|
|
|
* @return true if the image is present, otherwise false
|
|
|
|
*/
|
2014-12-06 21:37:22 +00:00
|
|
|
virtual bool has_key_image(const crypto::key_image& img) const = 0;
|
2014-10-06 23:46:25 +00:00
|
|
|
|
2017-05-14 13:06:55 +00:00
|
|
|
/**
|
|
|
|
* @brief add a txpool transaction
|
|
|
|
*
|
|
|
|
* @param details the details of the transaction to add
|
|
|
|
*/
|
2018-10-31 08:31:13 +00:00
|
|
|
virtual void add_txpool_tx(const crypto::hash &txid, const cryptonote::blobdata &blob, const txpool_tx_meta_t& details) = 0;
|
2017-05-14 13:06:55 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief update a txpool transaction's metadata
|
|
|
|
*
|
|
|
|
* @param txid the txid of the transaction to update
|
|
|
|
* @param details the details of the transaction to update
|
|
|
|
*/
|
|
|
|
virtual void update_txpool_tx(const crypto::hash &txid, const txpool_tx_meta_t& details) = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get the number of transactions in the txpool
|
|
|
|
*/
|
2017-11-08 12:06:41 +00:00
|
|
|
virtual uint64_t get_txpool_tx_count(bool include_unrelayed_txes = true) const = 0;
|
2017-05-14 13:06:55 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief check whether a txid is in the txpool
|
|
|
|
*/
|
|
|
|
virtual bool txpool_has_tx(const crypto::hash &txid) const = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief remove a txpool transaction
|
|
|
|
*
|
|
|
|
* @param txid the transaction id of the transation to remove
|
|
|
|
*/
|
|
|
|
virtual void remove_txpool_tx(const crypto::hash& txid) = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief get a txpool transaction's metadata
|
|
|
|
*
|
|
|
|
* @param txid the transaction id of the transation to lookup
|
2017-12-14 17:09:30 +00:00
|
|
|
* @param meta the metadata to return
|
2017-05-14 13:06:55 +00:00
|
|
|
*
|
2017-12-14 17:09:30 +00:00
|
|
|
* @return true if the tx meta was found, false otherwise
|
2017-05-14 13:06:55 +00:00
|
|
|
*/
|
2017-12-14 17:09:30 +00:00
|
|
|
virtual bool get_txpool_tx_meta(const crypto::hash& txid, txpool_tx_meta_t &meta) const = 0;
|
2017-05-14 13:06:55 +00:00
|
|
|
|
2017-06-11 14:10:18 +00:00
|
|
|
/**
|
|
|
|
* @brief get a txpool transaction's blob
|
|
|
|
*
|
|
|
|
* @param txid the transaction id of the transation to lookup
|
|
|
|
* @param bd the blob to return
|
|
|
|
*
|
|
|
|
* @return true if the txid was in the txpool, false otherwise
|
|
|
|
*/
|
|
|
|
virtual bool get_txpool_tx_blob(const crypto::hash& txid, cryptonote::blobdata &bd) const = 0;
|
|
|
|
|
2017-05-14 13:06:55 +00:00
|
|
|
/**
|
|
|
|
* @brief get a txpool transaction's blob
|
|
|
|
*
|
|
|
|
* @param txid the transaction id of the transation to lookup
|
|
|
|
*
|
|
|
|
* @return the blob for that transaction
|
|
|
|
*/
|
|
|
|
virtual cryptonote::blobdata get_txpool_tx_blob(const crypto::hash& txid) const = 0;
|
|
|
|
|
2018-11-24 14:49:04 +00:00
|
|
|
/**
|
|
|
|
* @brief prune output data for the given amount
|
|
|
|
*
|
|
|
|
* @param amount the amount for which to prune data
|
|
|
|
*/
|
|
|
|
virtual void prune_outputs(uint64_t amount) = 0;
|
|
|
|
|
Pruning
The blockchain prunes seven eighths of prunable tx data.
This saves about two thirds of the blockchain size, while
keeping the node useful as a sync source for an eighth
of the blockchain.
No other data is currently pruned.
There are three ways to prune a blockchain:
- run monerod with --prune-blockchain
- run "prune_blockchain" in the monerod console
- run the monero-blockchain-prune utility
The first two will prune in place. Due to how LMDB works, this
will not reduce the blockchain size on disk. Instead, it will
mark parts of the file as free, so that future data will use
that free space, causing the file to not grow until free space
grows scarce.
The third way will create a second database, a pruned copy of
the original one. Since this is a new file, this one will be
smaller than the original one.
Once the database is pruned, it will stay pruned as it syncs.
That is, there is no need to use --prune-blockchain again, etc.
2018-04-29 22:30:51 +00:00
|
|
|
/**
|
|
|
|
* @brief get the blockchain pruning seed
|
|
|
|
* @return the blockchain pruning seed
|
|
|
|
*/
|
|
|
|
virtual uint32_t get_blockchain_pruning_seed() const = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief prunes the blockchain
|
|
|
|
* @param pruning_seed the seed to use, 0 for default (highly recommended)
|
|
|
|
* @return success iff true
|
|
|
|
*/
|
|
|
|
virtual bool prune_blockchain(uint32_t pruning_seed = 0) = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief prunes recent blockchain changes as needed, iff pruning is enabled
|
|
|
|
* @return success iff true
|
|
|
|
*/
|
|
|
|
virtual bool update_pruning() = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks pruning was done correctly, iff enabled
|
|
|
|
* @return success iff true
|
|
|
|
*/
|
|
|
|
virtual bool check_pruning() = 0;
|
|
|
|
|
2019-04-04 00:15:57 +00:00
|
|
|
/**
|
|
|
|
* @brief get the max block size
|
|
|
|
*/
|
|
|
|
virtual uint64_t get_max_block_size() = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief add a new max block size
|
|
|
|
*
|
|
|
|
* The max block size will be the maximum of sz and the current block size
|
|
|
|
*
|
|
|
|
* @param: sz the block size
|
|
|
|
*/
|
|
|
|
|
|
|
|
virtual void add_max_block_size(uint64_t sz) = 0;
|
2017-05-14 13:06:55 +00:00
|
|
|
/**
|
|
|
|
* @brief runs a function over all txpool transactions
|
|
|
|
*
|
|
|
|
* The subclass should run the passed function for each txpool tx it has
|
|
|
|
* stored, passing the tx id and metadata as its parameters.
|
|
|
|
*
|
|
|
|
* If any call to the function returns false, the subclass should return
|
|
|
|
* false. Otherwise, the subclass returns true.
|
|
|
|
*
|
|
|
|
* @param std::function fn the function to run
|
|
|
|
*
|
|
|
|
* @return false if the function returns false for any transaction, otherwise true
|
|
|
|
*/
|
2017-11-08 12:06:41 +00:00
|
|
|
virtual bool for_all_txpool_txes(std::function<bool(const crypto::hash&, const txpool_tx_meta_t&, const cryptonote::blobdata*)>, bool include_blob = false, bool include_unrelayed_txes = true) const = 0;
|
2017-05-14 13:06:55 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief runs a function over all key images stored
|
|
|
|
*
|
|
|
|
* The subclass should run the passed function for each key image it has
|
|
|
|
* stored, passing the key image as its parameter.
|
|
|
|
*
|
|
|
|
* If any call to the function returns false, the subclass should return
|
|
|
|
* false. Otherwise, the subclass returns true.
|
|
|
|
*
|
|
|
|
* @param std::function fn the function to run
|
|
|
|
*
|
|
|
|
* @return false if the function returns false for any key image, otherwise true
|
|
|
|
*/
|
2015-10-25 10:45:25 +00:00
|
|
|
virtual bool for_all_key_images(std::function<bool(const crypto::key_image&)>) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
2017-06-01 12:29:51 +00:00
|
|
|
* @brief runs a function over a range of blocks
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
2017-06-01 12:29:51 +00:00
|
|
|
* The subclass should run the passed function for each block in the
|
|
|
|
* specified range, passing (block_height, block_hash, block) as its parameters.
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* If any call to the function returns false, the subclass should return
|
|
|
|
* false. Otherwise, the subclass returns true.
|
|
|
|
*
|
|
|
|
* The subclass should throw DB_ERROR if any of the expected values are
|
|
|
|
* not found. Current implementations simply return false.
|
|
|
|
*
|
2017-06-01 12:29:51 +00:00
|
|
|
* @param h1 the start height
|
|
|
|
* @param h2 the end height
|
2016-03-25 06:22:06 +00:00
|
|
|
* @param std::function fn the function to run
|
|
|
|
*
|
|
|
|
* @return false if the function returns false for any block, otherwise true
|
|
|
|
*/
|
2017-06-01 12:29:51 +00:00
|
|
|
virtual bool for_blocks_range(const uint64_t& h1, const uint64_t& h2, std::function<bool(uint64_t, const crypto::hash&, const cryptonote::block&)>) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief runs a function over all transactions stored
|
|
|
|
*
|
|
|
|
* The subclass should run the passed function for each transaction it has
|
|
|
|
* stored, passing (transaction_hash, transaction) as its parameters.
|
|
|
|
*
|
|
|
|
* If any call to the function returns false, the subclass should return
|
|
|
|
* false. Otherwise, the subclass returns true.
|
|
|
|
*
|
|
|
|
* The subclass should throw DB_ERROR if any of the expected values are
|
|
|
|
* not found. Current implementations simply return false.
|
|
|
|
*
|
|
|
|
* @param std::function fn the function to run
|
2017-10-01 10:24:33 +00:00
|
|
|
* @param bool pruned whether to only get pruned tx data, or the whole
|
2016-03-25 06:22:06 +00:00
|
|
|
*
|
|
|
|
* @return false if the function returns false for any transaction, otherwise true
|
|
|
|
*/
|
2017-10-01 10:24:33 +00:00
|
|
|
virtual bool for_all_transactions(std::function<bool(const crypto::hash&, const cryptonote::transaction&)>, bool pruned) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief runs a function over all outputs stored
|
|
|
|
*
|
|
|
|
* The subclass should run the passed function for each output it has
|
|
|
|
* stored, passing (amount, transaction_hash, tx_local_output_index)
|
|
|
|
* as its parameters.
|
|
|
|
*
|
|
|
|
* If any call to the function returns false, the subclass should return
|
|
|
|
* false. Otherwise, the subclass returns true.
|
|
|
|
*
|
|
|
|
* The subclass should throw DB_ERROR if any of the expected values are
|
|
|
|
* not found. Current implementations simply return false.
|
|
|
|
*
|
|
|
|
* @param std::function f the function to run
|
|
|
|
*
|
|
|
|
* @return false if the function returns false for any output, otherwise true
|
|
|
|
*/
|
2018-02-19 11:15:15 +00:00
|
|
|
virtual bool for_all_outputs(std::function<bool(uint64_t amount, const crypto::hash &tx_hash, uint64_t height, size_t tx_idx)> f) const = 0;
|
|
|
|
virtual bool for_all_outputs(uint64_t amount, const std::function<bool(uint64_t height)> &f) const = 0;
|
|
|
|
|
2015-10-25 10:45:25 +00:00
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
//
|
2015-09-20 17:41:38 +00:00
|
|
|
// Hard fork related storage
|
2016-03-25 06:22:06 +00:00
|
|
|
//
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief sets which hardfork version a height is on
|
|
|
|
*
|
|
|
|
* @param height the height
|
|
|
|
* @param version the version
|
|
|
|
*/
|
2015-09-20 17:41:38 +00:00
|
|
|
virtual void set_hard_fork_version(uint64_t height, uint8_t version) = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief checks which hardfork version a height is on
|
|
|
|
*
|
|
|
|
* @param height the height
|
|
|
|
*
|
|
|
|
* @return the version
|
|
|
|
*/
|
2015-09-20 17:41:38 +00:00
|
|
|
virtual uint8_t get_hard_fork_version(uint64_t height) const = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief verify hard fork info in database
|
|
|
|
*/
|
2016-01-15 14:00:58 +00:00
|
|
|
virtual void check_hard_fork_info() = 0;
|
2016-03-25 06:22:06 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief delete hard fork info from database
|
|
|
|
*/
|
2016-02-05 01:15:37 +00:00
|
|
|
virtual void drop_hard_fork_info() = 0;
|
2015-09-20 17:41:38 +00:00
|
|
|
|
2016-03-26 14:30:23 +00:00
|
|
|
/**
|
|
|
|
* @brief return a histogram of outputs on the blockchain
|
|
|
|
*
|
|
|
|
* @param amounts optional set of amounts to lookup
|
2016-08-01 21:16:00 +00:00
|
|
|
* @param unlocked whether to restrict count to unlocked outputs
|
2016-09-17 14:45:51 +00:00
|
|
|
* @param recent_cutoff timestamp to determine whether an output is recent
|
2018-03-22 17:51:58 +00:00
|
|
|
* @param min_count return only amounts with at least that many instances
|
2016-03-26 14:30:23 +00:00
|
|
|
*
|
|
|
|
* @return a set of amount/instances
|
|
|
|
*/
|
2018-03-22 17:51:58 +00:00
|
|
|
virtual std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t>> get_output_histogram(const std::vector<uint64_t> &amounts, bool unlocked, uint64_t recent_cutoff, uint64_t min_count) const = 0;
|
2016-03-26 14:30:23 +00:00
|
|
|
|
2018-05-16 21:41:50 +00:00
|
|
|
virtual bool get_output_distribution(uint64_t amount, uint64_t from_height, uint64_t to_height, std::vector<uint64_t> &distribution, uint64_t &base) const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief is BlockchainDB in read-only mode?
|
|
|
|
*
|
|
|
|
* @return true if in read-only mode, otherwise false
|
|
|
|
*/
|
2015-12-26 22:27:35 +00:00
|
|
|
virtual bool is_read_only() const = 0;
|
|
|
|
|
2018-06-17 21:07:15 +00:00
|
|
|
/**
|
|
|
|
* @brief get disk space requirements
|
|
|
|
*
|
|
|
|
* @return the size required
|
|
|
|
*/
|
|
|
|
virtual uint64_t get_database_size() const = 0;
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
// TODO: this should perhaps be (or call) a series of functions which
|
|
|
|
// progressively update through version updates
|
|
|
|
/**
|
|
|
|
* @brief fix up anything that may be wrong due to past bugs
|
|
|
|
*/
|
2015-12-06 20:48:17 +00:00
|
|
|
virtual void fixup();
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
/**
|
|
|
|
* @brief set whether or not to automatically remove logs
|
|
|
|
*
|
|
|
|
* This function is only relevant for one implementation (BlockchainBDB), but
|
|
|
|
* is here to keep BlockchainDB users implementation-agnostic.
|
|
|
|
*
|
|
|
|
* @param auto_remove whether or not to auto-remove logs
|
|
|
|
*/
|
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
|
|
|
void set_auto_remove_logs(bool auto_remove) { m_auto_remove_logs = auto_remove; }
|
|
|
|
|
2016-03-25 06:22:06 +00:00
|
|
|
bool m_open; //!< Whether or not the BlockchainDB is open/ready for use
|
|
|
|
mutable epee::critical_section m_synchronization_lock; //!< A lock, currently for when BlockchainLMDB needs to resize the backing db file
|
|
|
|
|
2014-10-06 23:46:25 +00:00
|
|
|
}; // class BlockchainDB
|
|
|
|
|
2017-08-19 14:27:13 +00:00
|
|
|
BlockchainDB *new_db(const std::string& db_type);
|
2014-10-06 23:46:25 +00:00
|
|
|
|
|
|
|
} // namespace cryptonote
|
2014-10-15 22:33:53 +00:00
|
|
|
|
|
|
|
#endif // BLOCKCHAIN_DB_H
|