2017-02-21 17:38:18 +00:00
# Copyright (c) 2014-2017, The Monero Project
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# All rights reserved.
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2015-12-14 04:54:39 +00:00
#
2014-09-11 06:25:07 +00:00
# Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers
2017-04-02 10:19:25 +00:00
if ( IOS )
INCLUDE ( CmakeLists_IOS.txt )
endif ( )
2014-09-11 06:25:07 +00:00
2014-10-24 18:48:14 +00:00
cmake_minimum_required ( VERSION 2.8.7 )
2014-03-03 22:07:58 +00:00
2016-09-03 19:38:20 +00:00
project ( monero )
2014-10-23 18:03:54 +00:00
2014-10-21 17:33:26 +00:00
function ( die msg )
if ( NOT WIN32 )
string ( ASCII 27 Esc )
set ( ColourReset "${Esc}[m" )
set ( BoldRed "${Esc}[1;31m" )
else ( )
set ( ColourReset "" )
set ( BoldRed "" )
endif ( )
message ( FATAL_ERROR "${BoldRed}${msg}${ColourReset}" )
endfunction ( )
2014-09-29 18:13:15 +00:00
2016-08-28 10:53:46 +00:00
if ( NOT CMAKE_BUILD_TYPE )
set ( CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE )
message ( STATUS "Setting default build type: ${CMAKE_BUILD_TYPE}" )
endif ( )
2016-09-17 18:03:51 +00:00
string ( TOLOWER ${ CMAKE_BUILD_TYPE } CMAKE_BUILD_TYPE_LOWER )
2016-08-28 10:53:46 +00:00
2016-08-28 05:37:34 +00:00
# ARCH defines the target architecture, either by an explicit identifier or
# one of the following two keywords. By default, ARCH a value of 'native':
# target arch = host arch, binary is not portable. When ARCH is set to the
# string 'default', no -march arg is passed, which creates a binary that is
# portable across processors in the same family as host processor. In cases
# when ARCH is not set to an explicit identifier, cmake's builtin is used
# to identify the target architecture, to direct logic in this cmake script.
# Since ARCH is a cached variable, it will not be set on first cmake invocation.
if ( NOT ARCH OR ARCH STREQUAL "" OR ARCH STREQUAL "native" OR ARCH STREQUAL "default" )
set ( ARCH_ID "${CMAKE_SYSTEM_PROCESSOR}" )
else ( )
set ( ARCH_ID "${ARCH}" )
2016-06-22 00:07:19 +00:00
endif ( )
2016-09-17 18:31:25 +00:00
string ( TOLOWER "${ARCH_ID}" ARM_ID )
2016-09-18 21:56:38 +00:00
string ( SUBSTRING "${ARM_ID}" 0 3 ARM_TEST )
2016-08-28 11:28:05 +00:00
if ( ARM_TEST STREQUAL "arm" )
set ( ARM 1 )
2016-09-18 21:56:38 +00:00
string ( SUBSTRING "${ARM_ID}" 0 5 ARM_TEST )
2016-08-28 11:28:05 +00:00
if ( ARM_TEST STREQUAL "armv6" )
set ( ARM6 1 )
endif ( )
if ( ARM_TEST STREQUAL "armv7" )
set ( ARM7 1 )
2015-12-14 04:54:39 +00:00
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
endif ( )
2017-04-12 22:54:33 +00:00
if ( ARM_ID STREQUAL "aarch64" OR ARM_ID STREQUAL "arm64" OR ARM_ID STREQUAL "armv8-a" )
2016-08-29 10:13:55 +00:00
set ( ARM 1 )
set ( ARM8 1 )
2017-04-12 22:54:33 +00:00
set ( ARCH "armv8-a" )
2016-08-29 10:13:55 +00:00
endif ( )
2017-06-27 17:06:01 +00:00
if ( ARCH_ID STREQUAL "ppc64le" )
set ( PPC64LE 1 )
endif ( )
2016-08-28 11:28:05 +00:00
if ( WIN32 OR ARM )
2016-07-29 03:34:05 +00:00
set ( OPT_FLAGS_RELEASE "-O2" )
2016-01-02 19:39:03 +00:00
else ( )
2016-07-29 03:34:05 +00:00
set ( OPT_FLAGS_RELEASE "-Ofast" )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
endif ( )
2017-02-18 16:30:29 +00:00
# BUILD_TAG is used to select the build type to check for a new version
if ( BUILD_TAG )
message ( STATUS "Building build tag ${BUILD_TAG}" )
add_definitions ( "-DBUILD_TAG=${BUILD_TAG}" )
else ( )
message ( STATUS "Building without build tag" )
endif ( )
2016-07-29 03:34:05 +00:00
set ( CMAKE_C_FLAGS_RELEASE "-DNDEBUG ${OPT_FLAGS_RELEASE}" )
set ( CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG ${OPT_FLAGS_RELEASE}" )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
# set this to 0 if per-block checkpoint needs to be disabled
set ( PER_BLOCK_CHECKPOINT 1 )
if ( PER_BLOCK_CHECKPOINT )
add_definitions ( "-DPER_BLOCK_CHECKPOINT" )
endif ( )
2014-10-22 19:17:52 +00:00
list ( INSERT CMAKE_MODULE_PATH 0
" $ { C M A K E _ S O U R C E _ D I R } / c m a k e " )
2014-09-17 22:37:32 +00:00
2014-09-22 10:09:46 +00:00
if ( NOT DEFINED ENV{DEVELOPER_LOCAL_TOOLS} )
2014-10-21 18:24:49 +00:00
message ( STATUS "Could not find DEVELOPER_LOCAL_TOOLS in env (not required)" )
2015-02-12 19:59:39 +00:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT OFF )
elseif ( "$ENV{DEVELOPER_LOCAL_TOOLS}" EQUAL 1 )
2014-10-21 18:24:49 +00:00
message ( STATUS "Found: env DEVELOPER_LOCAL_TOOLS = 1" )
2015-02-12 19:59:39 +00:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT ON )
2014-09-22 10:09:46 +00:00
else ( )
2014-10-21 18:24:49 +00:00
message ( STATUS "Found: env DEVELOPER_LOCAL_TOOLS = 0" )
2015-02-12 19:59:39 +00:00
set ( BOOST_IGNORE_SYSTEM_PATHS_DEFAULT OFF )
2014-09-22 10:09:46 +00:00
endif ( )
2014-09-29 18:13:15 +00:00
2015-02-12 19:59:39 +00:00
message ( STATUS "BOOST_IGNORE_SYSTEM_PATHS defaults to ${BOOST_IGNORE_SYSTEM_PATHS_DEFAULT}" )
option ( BOOST_IGNORE_SYSTEM_PATHS "Ignore boost system paths for local boost installation" ${ BOOST_IGNORE_SYSTEM_PATHS_DEFAULT } )
2014-09-22 10:09:46 +00:00
2015-04-01 17:00:45 +00:00
if ( NOT DEFINED ENV{DEVELOPER_LIBUNBOUND_OLD} )
message ( STATUS "Could not find DEVELOPER_LIBUNBOUND_OLD in env (not required)" )
elseif ( "$ENV{DEVELOPER_LIBUNBOUND_OLD}" EQUAL 1 )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD = 1, will use the work around" )
add_definitions ( -DDEVELOPER_LIBUNBOUND_OLD )
elseif ( "$ENV{DEVELOPER_LIBUNBOUND_OLD}" EQUAL 0 )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD = 0" )
else ( )
message ( STATUS "Found: env DEVELOPER_LIBUNBOUND_OLD with bad value. Will NOT use the work around" )
endif ( )
2014-03-03 22:07:58 +00:00
set_property ( GLOBAL PROPERTY USE_FOLDERS ON )
enable_testing ( )
2015-04-01 17:00:45 +00:00
option ( BUILD_DOCUMENTATION "Build the Doxygen documentation." ON )
2017-10-09 00:45:45 +00:00
option ( BUILD_TESTS "Build tests." OFF )
2015-04-01 17:00:45 +00:00
2015-03-02 15:10:16 +00:00
# Check whether we're on a 32-bit or 64-bit system
if ( CMAKE_SIZEOF_VOID_P EQUAL "8" )
2015-03-02 16:04:58 +00:00
set ( DEFAULT_BUILD_64 ON )
2015-03-02 15:10:16 +00:00
else ( )
2015-03-02 16:04:58 +00:00
set ( DEFAULT_BUILD_64 OFF )
endif ( )
2015-03-03 19:57:40 +00:00
option ( BUILD_64 "Build for 64-bit? 'OFF' builds for 32-bit." ${ DEFAULT_BUILD_64 } )
2015-03-02 16:04:58 +00:00
if ( BUILD_64 )
set ( ARCH_WIDTH "64" )
else ( )
set ( ARCH_WIDTH "32" )
2015-03-02 15:10:16 +00:00
endif ( )
2015-03-02 15:20:42 +00:00
message ( STATUS "Building for a ${ARCH_WIDTH}-bit system" )
2015-04-01 17:00:45 +00:00
2014-09-09 12:03:42 +00:00
# Check if we're on FreeBSD so we can exclude the local miniupnpc (it should be installed from ports instead)
# CMAKE_SYSTEM_NAME checks are commonly known, but specifically taken from libsdl's CMakeLists
2016-12-15 10:27:53 +00:00
if ( CMAKE_SYSTEM_NAME MATCHES "kFreeBSD.*|FreeBSD" )
2014-09-09 12:03:42 +00:00
set ( FREEBSD TRUE )
endif ( )
2016-12-15 10:27:53 +00:00
# Check if we're on DragonFly BSD. See the README.md for build instructions.
if ( CMAKE_SYSTEM_NAME MATCHES "DragonFly.*" )
set ( DRAGONFLY TRUE )
endif ( )
2016-01-21 18:34:02 +00:00
# Check if we're on OpenBSD. See the README.md for build instructions.
2016-01-21 18:18:26 +00:00
if ( CMAKE_SYSTEM_NAME MATCHES "kOpenBSD.*|OpenBSD.*" )
set ( OPENBSD TRUE )
endif ( )
# TODO: check bsdi, NetBSD, to see if they need the same FreeBSD changes
2015-12-14 04:54:39 +00:00
#
2014-09-09 12:03:42 +00:00
# elseif(CMAKE_SYSTEM_NAME MATCHES "kNetBSD.*|NetBSD.*")
# set(NETBSD TRUE)
# elseif(CMAKE_SYSTEM_NAME MATCHES ".*BSDI.*")
# set(BSDI TRUE)
2017-09-13 23:28:23 +00:00
include_directories ( external/easylogging++ src contrib/epee/include external )
2014-03-03 22:07:58 +00:00
2014-04-30 17:52:21 +00:00
if ( APPLE )
2014-04-30 20:50:06 +00:00
include_directories ( SYSTEM /usr/include/malloc )
2016-12-23 07:54:34 +00:00
if ( POLICY CMP0042 )
cmake_policy ( SET CMP0042 NEW )
endif ( )
2014-04-30 17:52:21 +00:00
endif ( )
2014-08-06 16:27:16 +00:00
if ( MSVC OR MINGW )
set ( DEFAULT_STATIC true )
else ( )
set ( DEFAULT_STATIC false )
endif ( )
2014-10-21 18:20:26 +00:00
option ( STATIC "Link libraries statically" ${ DEFAULT_STATIC } )
2014-03-03 22:07:58 +00:00
2016-09-17 18:03:51 +00:00
# This is a CMake built-in switch that concerns internal libraries
if ( NOT DEFINED BUILD_SHARED_LIBS AND NOT STATIC AND CMAKE_BUILD_TYPE_LOWER STREQUAL "debug" )
2016-11-11 00:35:58 +00:00
set ( BUILD_SHARED_LIBS ON )
2016-09-17 18:03:51 +00:00
endif ( )
2016-11-11 00:35:58 +00:00
2016-09-17 18:03:51 +00:00
if ( BUILD_SHARED_LIBS )
2016-11-11 00:35:58 +00:00
message ( STATUS "Building internal libraries with position independent code" )
2016-09-17 18:03:51 +00:00
set ( PIC_FLAG "-fPIC" )
2017-01-26 15:07:23 +00:00
add_definitions ( "-DBUILD_SHARED_LIBS" )
2016-09-17 18:03:51 +00:00
else ( )
message ( STATUS "Building internal libraries as static" )
endif ( )
2014-11-17 23:09:21 +00:00
if ( MINGW )
2015-07-18 02:49:22 +00:00
string ( REGEX MATCH "^[^/]:/[^/]*" msys2_install_path "${CMAKE_C_COMPILER}" )
message ( STATUS "MSYS location: ${msys2_install_path}" )
2015-03-02 15:20:42 +00:00
set ( CMAKE_INCLUDE_PATH "${msys2_install_path}/mingw${ARCH_WIDTH}/include" )
2014-11-18 22:04:47 +00:00
# This is necessary because otherwise CMake will make Boost libraries -lfoo
# rather than a full path. Unfortunately, this makes the shared libraries get
# linked due to a bug in CMake which misses putting -static flags around the
# -lfoo arguments.
2015-07-18 02:49:22 +00:00
set ( DEFLIB ${ msys2_install_path } /mingw ${ ARCH_WIDTH } /lib )
list ( REMOVE_ITEM CMAKE_C_IMPLICIT_LINK_DIRECTORIES ${ DEFLIB } )
list ( REMOVE_ITEM CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES ${ DEFLIB } )
2014-11-17 23:09:21 +00:00
endif ( )
2014-10-21 18:24:49 +00:00
if ( STATIC )
if ( MSVC )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .lib .dll.a .a ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
else ( )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .a ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
endif ( )
2017-09-19 23:47:24 +00:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DZMQ_STATIC" )
2014-10-21 18:24:49 +00:00
endif ( )
2014-10-06 16:46:18 +00:00
2017-03-08 13:29:08 +00:00
# Set default blockchain storage location:
# memory was the default in Cryptonote before Monero implimented LMDB, it still works but is unneccessary.
2015-01-26 05:36:09 +00:00
# set(DATABASE memory)
set ( DATABASE lmdb )
if ( DEFINED ENV{DATABASE} )
set ( DATABASE $ENV{ DATABASE } )
message ( STATUS "DATABASE set: ${DATABASE}" )
else ( )
message ( STATUS "Could not find DATABASE in env (not required unless you want to change database type from default: ${DATABASE})" )
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-02-01 11:11:13 +00:00
set ( BERKELEY_DB_OVERRIDE 0 )
if ( DEFINED ENV{BERKELEY_DB} )
set ( BERKELEY_DB_OVERRIDE 1 )
set ( BERKELEY_DB $ENV{ BERKELEY_DB } )
elseif ( )
set ( BERKELEY_DB 0 )
endif ( )
2015-01-26 05:36:09 +00:00
if ( DATABASE STREQUAL "lmdb" )
2016-03-12 19:20:00 +00:00
message ( STATUS "Using LMDB as default DB type" )
2015-01-26 05:36:09 +00:00
set ( BLOCKCHAIN_DB DB_LMDB )
2016-03-12 19:20:00 +00:00
add_definitions ( "-DDEFAULT_DB_TYPE=\" lmdb\ "" )
elseif ( DATABASE STREQUAL "berkeleydb" )
find_package ( BerkeleyDB )
if ( NOT BERKELEY_DB )
die ( "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev / libdb++-dev or the equivalent." )
else ( )
message ( STATUS "Found BerkeleyDB include (db.h) in ${BERKELEY_DB_INCLUDE_DIR}" )
if ( BERKELEY_DB_LIBRARIES )
message ( STATUS "Found BerkeleyDB shared library" )
set ( BDB_STATIC false CACHE BOOL "BDB Static flag" )
set ( BDB_INCLUDE ${ BERKELEY_DB_INCLUDE_DIR } CACHE STRING "BDB include path" )
set ( BDB_LIBRARY ${ BERKELEY_DB_LIBRARIES } CACHE STRING "BDB library name" )
set ( BDB_LIBRARY_DIRS "" CACHE STRING "BDB Library dirs" )
set ( BERKELEY_DB 1 )
2015-12-14 04:54:39 +00:00
else ( )
2016-03-12 19:20:00 +00:00
die ( "Found BerkeleyDB includes, but could not find BerkeleyDB library. Please make sure you have installed libdb and libdb-dev / libdb++-dev or the equivalent." )
2015-12-14 04:54:39 +00:00
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
endif ( )
2015-10-04 18:01:33 +00:00
2016-03-12 19:20:00 +00:00
message ( STATUS "Using Berkeley DB as default DB type" )
add_definitions ( "-DDEFAULT_DB_TYPE=\" berkeley\ "" )
2015-01-26 05:36:09 +00:00
else ( )
die ( "Invalid database type: ${DATABASE}" )
endif ( )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
if ( BERKELEY_DB )
add_definitions ( "-DBERKELEY_DB" )
endif ( )
2015-12-14 04:54:39 +00:00
2015-01-26 05:36:09 +00:00
add_definitions ( "-DBLOCKCHAIN_DB=${BLOCKCHAIN_DB}" )
2016-07-27 05:02:55 +00:00
# Can't install hook in static build on OSX, because OSX linker does not support --wrap
cmake: ARM: exclude libunwind in static build
Else error in build with STATIC=ON:
cd /home/redfish/bitmonero/build/release/src/miner && /usr/bin/cmake -E
cmake_link_script CMakeFiles/simpleminer.dir/link.txt --verbose=1
/usr/bin/c++ -std=c++11 -D_GNU_SOURCE -Wall -Wextra -Wpointer-arith
-Wundef -Wvla -Wwrite-strings -Wno-error=extra
-Wno-error=deprecated-declarations -Wno-unused-parameter
-Wno-unused-variable -Wno-error=unused-variable -Wno-error=undef
-Wno-error=uninitialized -Wlogical-op -Wno-error=maybe-uninitialized
-Wno-reorder -Wno-missing-field-initializers -march=armv7-a
-fno-strict-aliasing -mfloat-abi=hard -DNDEBUG -O2 -flto
-ffat-lto-objects -static-libgcc -static-libstdc++
-Wl,--wrap=__cxa_throw CMakeFiles/simpleminer.dir/simpleminer.cpp.o -o
../../bin/simpleminer -rdynamic -Wl,-Bstatic -lrt -Wl,-Bdynamic -ldl
../cryptonote_core/libcryptonote_core.a ../common/libcommon.a
-Wl,-Bstatic -lboost_filesystem -lboost_program_options -lboost_regex
-lboost_chrono -lboost_system -lboost_thread -Wl,-Bdynamic -pthread
-Wl,-Bstatic -lrt -Wl,-Bdynamic -ldl ../blockchain_db/libblockchain_db.a
../cryptonote_core/libcryptonote_core.a
../blockchain_db/libblockchain_db.a
../../contrib/otshell_utils/libotshell_utils.a ../blocks/libblocks.a
../common/libcommon.a ../../external/unbound/libunbound.a -lssl -lcrypto
-lunwind -Wl,-Bstatic -lboost_program_options ../crypto/libcrypto.a
-lboost_date_time -lboost_serialization -lboost_filesystem
../../external/db_drivers/liblmdb/liblmdb.a -Wl,-Bdynamic -pthread
-Wl,-Bstatic -lboost_chrono -lboost_system -lboost_thread -lrt
-Wl,-Bdynamic -ldl
/usr/bin/ld: ../../bin/simpleminer: hidden symbol
`__aeabi_unwind_cpp_pr0' in
/usr/lib/gcc/armv7l-unknown-linux-gnueabihf/6.1.1/libgcc_eh.a(unwind-arm.o)
is referenced by DSO
/usr/bin/ld: final link failed: Bad value
collect2: error: ld returned 1 exit status
2016-08-28 11:12:36 +00:00
# On ARM, having libunwind package (with .so's only) installed breaks static link.
2017-10-07 16:13:31 +00:00
# When possible, avoid stack tracing using libunwind in favor of using easylogging++.
2017-02-04 12:33:03 +00:00
if ( APPLE )
set ( DEFAULT_STACK_TRACE OFF )
set ( LIBUNWIND_LIBRARIES "" )
elseif ( CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT MINGW )
set ( DEFAULT_STACK_TRACE ON )
2017-10-07 16:13:31 +00:00
set ( STACK_TRACE_LIB "easylogging++" ) # for diag output only
2017-02-04 12:33:03 +00:00
set ( LIBUNWIND_LIBRARIES "" )
elseif ( ARM AND STATIC )
2016-07-27 05:02:55 +00:00
set ( DEFAULT_STACK_TRACE OFF )
2016-06-20 18:20:14 +00:00
set ( LIBUNWIND_LIBRARIES "" )
2017-01-09 13:38:54 +00:00
else ( )
find_package ( Libunwind )
2017-02-04 12:33:03 +00:00
if ( LIBUNWIND_FOUND )
set ( DEFAULT_STACK_TRACE ON )
2017-10-07 16:13:31 +00:00
set ( STACK_TRACE_LIB "libunwind" ) # for diag output only
2017-02-04 12:33:03 +00:00
else ( )
set ( DEFAULT_STACK_TRACE OFF )
set ( LIBUNWIND_LIBRARIES "" )
endif ( )
2016-03-28 18:00:18 +00:00
endif ( )
2016-07-27 05:02:55 +00:00
option ( STACK_TRACE "Install a hook that dumps stack on exception" ${ DEFAULT_STACK_TRACE } )
if ( STACK_TRACE )
2017-10-07 16:13:31 +00:00
message ( STATUS "Stack trace on exception enabled (using ${STACK_TRACE_LIB})" )
2016-07-27 05:02:55 +00:00
else ( )
message ( STATUS "Stack trace on exception disabled" )
endif ( )
2017-09-20 00:38:43 +00:00
if ( UNIX AND NOT APPLE )
# Note that at the time of this writing the -Wstrict-prototypes flag added below will make this fail
set ( THREADS_PREFER_PTHREAD_FLAG ON )
find_package ( Threads )
endif ( )
2017-06-20 15:22:24 +00:00
# Handle OpenSSL, used for sha256sum on binary updates and light wallet ssl http
2017-04-02 10:19:25 +00:00
if ( APPLE AND NOT IOS )
2017-02-21 15:11:12 +00:00
if ( NOT OpenSSL_DIR )
EXECUTE_PROCESS ( COMMAND brew --prefix openssl
O U T P U T _ V A R I A B L E O P E N S S L _ R O O T _ D I R
O U T P U T _ S T R I P _ T R A I L I N G _ W H I T E S P A C E )
message ( STATUS "Using OpenSSL found at ${OPENSSL_ROOT_DIR}" )
endif ( )
endif ( )
find_package ( OpenSSL REQUIRED )
2017-08-05 12:28:50 +00:00
message ( STATUS "Using OpenSSL include dir at ${OPENSSL_INCLUDE_DIR}" )
include_directories ( ${ OPENSSL_INCLUDE_DIR } )
2017-04-02 10:19:25 +00:00
if ( STATIC AND NOT IOS )
2017-02-21 15:11:12 +00:00
if ( UNIX )
2017-09-20 00:38:43 +00:00
set ( OPENSSL_LIBRARIES "${OPENSSL_LIBRARIES};${CMAKE_DL_LIBS};${CMAKE_THREAD_LIBS_INIT}" )
2017-02-21 15:11:12 +00:00
endif ( )
endif ( )
2017-05-17 01:17:08 +00:00
add_definitions ( -DAUTO_INITIALIZE_EASYLOGGINGPP )
2014-10-06 13:00:06 +00:00
add_subdirectory ( external )
# Final setup for miniupnpc
2017-04-02 10:19:25 +00:00
if ( UPNP_STATIC OR IOS )
2014-10-06 13:00:06 +00:00
add_definitions ( "-DUPNP_STATIC" )
else ( )
add_definitions ( "-DUPNP_DYNAMIC" )
include_directories ( ${ UPNP_INCLUDE } )
endif ( )
# Final setup for libunbound
2014-09-24 18:38:24 +00:00
include_directories ( ${ UNBOUND_INCLUDE } )
2014-10-21 22:39:15 +00:00
link_directories ( ${ UNBOUND_LIBRARY_DIRS } )
2014-09-17 21:44:35 +00:00
2017-03-25 19:06:14 +00:00
# Final setup for easylogging++
include_directories ( ${ EASYLOGGING_INCLUDE } )
link_directories ( ${ EASYLOGGING_LIBRARY_DIRS } )
2014-12-01 19:15:50 +00:00
2015-01-18 23:30:31 +00:00
# Final setup for liblmdb
2014-12-06 15:47:49 +00:00
include_directories ( ${ LMDB_INCLUDE } )
2014-10-23 19:35:49 +00:00
2015-03-16 07:12:54 +00:00
# Final setup for Berkeley DB
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
if ( BERKELEY_DB )
2015-04-07 18:27:37 +00:00
include_directories ( ${ BDB_INCLUDE } )
endif ( )
2015-03-16 07:12:54 +00:00
2016-03-28 18:00:18 +00:00
# Final setup for libunwind
include_directories ( ${ LIBUNWIND_INCLUDE } )
link_directories ( ${ LIBUNWIND_LIBRARY_DIRS } )
2014-03-03 22:07:58 +00:00
if ( MSVC )
add_definitions ( "/bigobj /MP /W3 /GS- /D_CRT_SECURE_NO_WARNINGS /wd4996 /wd4345 /D_WIN32_WINNT=0x0600 /DWIN32_LEAN_AND_MEAN /DGTEST_HAS_TR1_TUPLE=0 /FIinline_c.h /D__SSE4_1__" )
2016-07-29 01:19:01 +00:00
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Dinline=__inline")
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:10485760" )
2014-03-03 22:07:58 +00:00
if ( STATIC )
foreach ( VAR CMAKE_C_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE )
string ( REPLACE "/MD" "/MT" ${ VAR } "${${VAR}}" )
endforeach ( )
endif ( )
include_directories ( SYSTEM src/platform/msc )
else ( )
2016-09-18 02:23:15 +00:00
include ( TestCXXAcceptsFlag )
2016-08-28 05:37:34 +00:00
set ( ARCH native CACHE STRING "CPU to build for: -march value or 'default' to not pass -march at all" )
message ( STATUS "Building on ${CMAKE_SYSTEM_PROCESSOR} for ${ARCH}" )
if ( ARCH STREQUAL "default" )
2014-03-03 22:07:58 +00:00
set ( ARCH_FLAG "" )
2017-06-27 17:06:01 +00:00
elseif ( PPC64LE )
set ( ARCH_FLAG "-mcpu=${ARCH}" )
2017-06-23 08:02:07 +00:00
elseif ( IOS AND ARCH STREQUAL "arm64" )
message ( STATUS "IOS: Changing arch from arm64 to armv8" )
set ( ARCH_FLAG "-march=armv8" )
2014-03-03 22:07:58 +00:00
else ( )
2016-08-28 05:42:59 +00:00
set ( ARCH_FLAG "-march=${ARCH}" )
2014-03-03 22:07:58 +00:00
endif ( )
2016-07-29 03:34:05 +00:00
set ( WARNINGS "-Wall -Wextra -Wpointer-arith -Wundef -Wvla -Wwrite-strings -Wno-error=extra -Wno-error=deprecated-declarations -Wno-unused-parameter -Wno-unused-variable -Wno-error=unused-variable -Wno-error=undef -Wno-error=uninitialized" )
2014-11-17 23:10:07 +00:00
if ( NOT MINGW )
cmake: do not pass -Werror when building tests
The tests currently issue a warning that
"warning: -fassociative-math disabled; other options take precedence"
The associative math optimization is turned on indirectly by -Ofast.
Apparently, the optimization is forced to be disabled, while compiling
test harnesses generated by Google Test framework.
Unfortunately, there is no -Wno-error=* flag to disable this warning
(see gcc --help=warnings).
An alternative to this patch is to disable the optimization explicitly
with -fno-associative-math, but that seems worse.
Another alternative is to not pass -Ofast for tests build, but we
want the tests to be built with exact same optimization flags as
the code being tested, otherwise the value of the tests is diminished.
Another alternative is to remove -Werror from the entire build, but
it's good to include that flag to preclude people leaving warnings.
A note regarding implementation of not passing -Werror for tests:
I considered filtering out -Werror from CMAKE_{C,CXX}_FLAGS but
that seems to be worse because it's surprizing behavior, to those
reading the code that adds -Werror. It is better to add it for
when it is used and not added otherwise. I also considered relying
on order, adding -Werror after inluding 'tests' subdir, but before
including the other subdirs, but that also seems cryptic to the
reader. So, I settled with the current solution, of explicitly
setting CMAKE_{C,CXX}_FLAGS to different values before including the
respective subdir.
Testing done: compared compiler invocation for non-tests source files
using `make VERBOSE=1` with and without this commit: the only difference
is the position of -Werror. So, this commit doesn't change the binary.
2016-07-05 01:21:02 +00:00
set ( WARNINGS_AS_ERRORS_FLAG "-Werror" )
2014-11-17 23:10:07 +00:00
endif ( )
2014-03-03 22:07:58 +00:00
if ( CMAKE_C_COMPILER_ID STREQUAL "Clang" )
2016-08-28 11:28:05 +00:00
if ( ARM )
2016-07-11 01:11:18 +00:00
set ( WARNINGS "${WARNINGS} -Wno-error=inline-asm" )
endif ( )
2014-03-03 22:07:58 +00:00
else ( )
cmake: make warning about headers not fatal
Warning issued on older boost and/or OS:
In file included from /usr/include/boost/asio/detail/socket_types.hpp:61:0,
from /usr/include/boost/asio/detail/epoll_reactor.hpp:30,
from /usr/include/boost/asio/detail/reactor.hpp:21,
from /usr/include/boost/asio/detail/impl/task_io_service.ipp:24,
from /usr/include/boost/asio/detail/task_io_service.hpp:198,
from /usr/include/boost/asio/impl/io_service.hpp:71,
from /usr/include/boost/asio/io_service.hpp:767,
from /usr/include/boost/asio/basic_io_object.hpp:19,
from /usr/include/boost/asio/basic_socket.hpp:20,
from /usr/include/boost/asio/basic_datagram_socket.hpp:20,
from /usr/include/boost/asio.hpp:21,
from /home/vagrant/slave/monero-static-alpine-3_5-x86_64/build/src/common/download.cpp:32:
/usr/include/sys/poll.h:1:2: warning: #warning redirecting incorrect #include <sys/poll.h> to <poll.h> [-Wcpp]
#warning redirecting incorrect #include <sys/poll.h> to <poll.h>
2017-09-09 02:20:44 +00:00
set ( WARNINGS "${WARNINGS} -Wlogical-op -Wno-error=maybe-uninitialized -Wno-error=cpp" )
2014-03-03 22:07:58 +00:00
endif ( )
if ( MINGW )
2014-08-06 16:32:59 +00:00
set ( WARNINGS "${WARNINGS} -Wno-error=unused-value -Wno-error=unused-but-set-variable" )
2016-06-21 16:16:25 +00:00
set ( MINGW_FLAG "${MINGW_FLAG} -DWIN32_LEAN_AND_MEAN" )
2014-08-06 16:43:01 +00:00
set ( Boost_THREADAPI win32 )
2014-03-03 22:07:58 +00:00
include_directories ( SYSTEM src/platform/mingw )
2014-11-17 23:10:52 +00:00
# mingw doesn't support LTO (multiple definition errors at link time)
set ( USE_LTO_DEFAULT false )
2016-07-29 01:19:01 +00:00
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--stack,10485760" )
2016-01-02 19:39:03 +00:00
if ( NOT BUILD_64 )
add_definitions ( -DWINVER=0x0501 -D_WIN32_WINNT=0x0501 )
endif ( )
2014-03-03 22:07:58 +00:00
endif ( )
set ( C_WARNINGS "-Waggregate-return -Wnested-externs -Wold-style-definition -Wstrict-prototypes" )
set ( CXX_WARNINGS "-Wno-reorder -Wno-missing-field-initializers" )
2015-05-31 13:39:56 +00:00
try_compile ( STATIC_ASSERT_RES "${CMAKE_CURRENT_BINARY_DIR}/static-assert" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/test-static-assert.c" COMPILE_DEFINITIONS "-std=c11" )
2014-03-03 22:07:58 +00:00
if ( STATIC_ASSERT_RES )
set ( STATIC_ASSERT_FLAG "" )
else ( )
set ( STATIC_ASSERT_FLAG "-Dstatic_assert=_Static_assert" )
endif ( )
2015-01-26 21:19:53 +00:00
2017-01-05 01:11:05 +00:00
try_compile ( STATIC_ASSERT_CPP_RES "${CMAKE_CURRENT_BINARY_DIR}/static-assert" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/test-static-assert.cpp" COMPILE_DEFINITIONS "-std=c++11" )
if ( STATIC_ASSERT_CPP_RES )
set ( STATIC_ASSERT_CPP_FLAG "" )
else ( )
set ( STATIC_ASSERT_CPP_FLAG "-Dstatic_assert=_Static_assert" )
endif ( )
2016-08-29 18:23:57 +00:00
option ( COVERAGE "Enable profiling for test coverage report" 0 )
if ( COVERAGE )
message ( STATUS "Building with profiling for test coverage report" )
set ( COVERAGE_FLAGS "-fprofile-arcs -ftest-coverage --coverage" )
endif ( )
2016-07-05 01:36:55 +00:00
# With GCC 6.1.1 the compiled binary malfunctions due to aliasing. Until that
# is fixed in the code (Issue #847), force compiler to be conservative.
2016-07-29 01:19:01 +00:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing" )
2016-07-05 01:36:55 +00:00
2015-01-26 21:19:53 +00:00
option ( NO_AES "Explicitly disable AES support" ${ NO_AES } )
2017-10-05 16:04:41 +00:00
if ( NO_AES )
message ( STATUS "AES support explicitly disabled" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DNO_AES" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNO_AES" )
elseif ( NOT ARM AND NOT PPC64LE )
2016-06-21 23:53:07 +00:00
message ( STATUS "AES support enabled" )
2016-07-29 01:19:01 +00:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maes" )
2017-06-27 17:06:01 +00:00
elseif ( PPC64LE )
message ( STATUS "AES support not available on ppc64le" )
2016-09-18 02:23:15 +00:00
elseif ( ARM6 )
message ( STATUS "AES support not available on ARMv6" )
elseif ( ARM7 )
message ( STATUS "AES support not available on ARMv7" )
elseif ( ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( "-march=${ARCH}+crypto" ARCH_PLUS_CRYPTO )
if ( ARCH_PLUS_CRYPTO )
2016-09-18 02:26:18 +00:00
message ( STATUS "Crypto extensions enabled for ARMv8" )
2016-09-18 02:23:15 +00:00
set ( ARCH_FLAG "-march=${ARCH}+crypto" )
else ( )
2016-09-18 02:26:18 +00:00
message ( STATUS "Crypto extensions unavailable on your ARMv8 device" )
2016-09-18 02:23:15 +00:00
endif ( )
2015-01-26 21:19:53 +00:00
else ( )
2016-06-21 23:53:07 +00:00
message ( STATUS "AES support disabled" )
2015-01-26 21:19:53 +00:00
endif ( )
2015-04-06 12:00:09 +00:00
2016-11-11 00:35:58 +00:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_FLAG} ${WARNINGS} ${C_WARNINGS} ${ARCH_FLAG} ${COVERAGE_FLAGS} ${PIC_FLAG}" )
2017-01-05 01:11:05 +00:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${STATIC_ASSERT_CPP_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG} ${COVERAGE_FLAGS} ${PIC_FLAG}" )
2016-09-18 02:23:15 +00:00
# With GCC 6.1.1 the compiled binary malfunctions due to aliasing. Until that
# is fixed in the code (Issue #847), force compiler to be conservative.
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing" )
2016-08-29 10:13:55 +00:00
if ( ARM )
message ( STATUS "Setting FPU Flags for ARM Processors" )
#NB NEON hardware does not fully implement the IEEE 754 standard for floating-point arithmetic
#Need custom assembly code to take full advantage of NEON SIMD
#Cortex-A5/9 -mfpu=neon-fp16
#Cortex-A7/15 -mfpu=neon-vfpv4
#Cortex-A8 -mfpu=neon
2016-09-18 02:23:15 +00:00
#ARMv8 -FP and SIMD on by default for all ARM8v-A series, NO -mfpu setting needed
2016-08-29 10:13:55 +00:00
#For custom -mtune, processor IDs for ARMv8-A series:
#0xd04 - Cortex-A35
#0xd07 - Cortex-A57
#0xd08 - Cortex-A72
#0xd03 - Cortex-A73
if ( NOT ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( -mfpu=vfp3-d16 CXX_ACCEPTS_VFP3_D16 )
CHECK_CXX_ACCEPTS_FLAG ( -mfpu=vfp4 CXX_ACCEPTS_VFP4 )
CHECK_CXX_ACCEPTS_FLAG ( -mfloat-abi=hard CXX_ACCEPTS_MFLOAT_HARD )
CHECK_CXX_ACCEPTS_FLAG ( -mfloat-abi=softfp CXX_ACCEPTS_MFLOAT_SOFTFP )
endif ( )
2015-04-06 12:00:09 +00:00
2016-08-29 10:13:55 +00:00
if ( ARM8 )
CHECK_CXX_ACCEPTS_FLAG ( -mfix-cortex-a53-835769 CXX_ACCEPTS_MFIX_CORTEX_A53_835769 )
CHECK_CXX_ACCEPTS_FLAG ( -mfix-cortex-a53-843419 CXX_ACCEPTS_MFIX_CORTEX_A53_843419 )
endif ( )
if ( ARM6 )
message ( STATUS "Selecting VFP for ARMv6" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp" )
endif ( ARM6 )
if ( ARM7 )
if ( CXX_ACCEPTS_VFP3_D16 AND NOT CXX_ACCEPTS_VFP4 )
message ( STATUS "Selecting VFP3 for ARMv7" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp3-d16" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp3-d16" )
endif ( )
if ( CXX_ACCEPTS_VFP4 )
message ( STATUS "Selecting VFP4 for ARMv7" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp4" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=vfp4" )
endif ( )
if ( CXX_ACCEPTS_MFLOAT_HARD )
message ( STATUS "Setting Hardware ABI for Floating Point" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=hard" )
endif ( )
if ( CXX_ACCEPTS_MFLOAT_SOFTFP AND NOT CXX_ACCEPTS_MFLOAT_HARD )
message ( STATUS "Setting Software ABI for Floating Point" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=softfp" )
endif ( )
endif ( ARM7 )
if ( ARM8 )
if ( CXX_ACCEPTS_MFIX_CORTEX_A53_835769 )
message ( STATUS "Enabling Cortex-A53 workaround 835769" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfix-cortex-a53-835769" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfix-cortex-a53-835769" )
endif ( )
if ( CXX_ACCEPTS_MFIX_CORTEX_A53_843419 )
message ( STATUS "Enabling Cortex-A53 workaround 843419" )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfix-cortex-a53-843419" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfix-cortex-a53-843419" )
endif ( )
endif ( ARM8 )
endif ( ARM )
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2017-04-02 10:19:25 +00:00
if ( ANDROID AND NOT BUILD_GUI_DEPS STREQUAL "ON" OR IOS )
2017-01-05 01:11:05 +00:00
#From Android 5: "only position independent executables (PIE) are supported"
2017-04-02 10:19:25 +00:00
message ( STATUS "Enabling PIE executable" )
2017-01-05 01:11:05 +00:00
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIE" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE" )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -pie" )
endif ( )
2014-04-30 20:50:06 +00:00
if ( APPLE )
2016-07-29 01:19:01 +00:00
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_HAS_TR1_TUPLE=0" )
2014-04-30 20:50:06 +00:00
endif ( )
2016-08-29 18:23:57 +00:00
set ( DEBUG_FLAGS "-g3" )
2014-03-03 22:07:58 +00:00
if ( CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT ( CMAKE_C_COMPILER_VERSION VERSION_LESS 4.8 ) )
2016-08-29 18:23:57 +00:00
set ( DEBUG_FLAGS "${DEBUG_FLAGS} -Og " )
2014-03-03 22:07:58 +00:00
else ( )
2016-08-29 18:23:57 +00:00
set ( DEBUG_FLAGS "${DEBUG_FLAGS} -O0 " )
2014-03-03 22:07:58 +00:00
endif ( )
2014-08-06 16:52:25 +00:00
2014-08-07 23:34:10 +00:00
if ( NOT DEFINED USE_LTO_DEFAULT )
2017-03-20 12:24:30 +00:00
set ( USE_LTO_DEFAULT false )
2014-08-07 23:34:10 +00:00
endif ( )
set ( USE_LTO ${ USE_LTO_DEFAULT } CACHE BOOL "Use Link-Time Optimization (Release mode only)" )
2014-09-15 20:47:26 +00:00
if ( CMAKE_CXX_COMPILER_ID STREQUAL "Clang" )
2015-05-26 09:07:58 +00:00
# There is a clang bug that does not allow to compile code that uses AES-NI intrinsics if -flto is enabled, so explicitly disable
2014-10-21 18:24:49 +00:00
set ( USE_LTO false )
2014-09-15 20:47:26 +00:00
endif ( )
2015-05-26 09:07:58 +00:00
2014-08-06 16:52:25 +00:00
if ( USE_LTO )
2014-05-22 11:00:48 +00:00
set ( RELEASE_FLAGS "${RELEASE_FLAGS} -flto" )
2014-08-06 16:52:25 +00:00
if ( STATIC )
set ( RELEASE_FLAGS "${RELEASE_FLAGS} -ffat-lto-objects" )
endif ( )
# Since gcc 4.9 the LTO format is non-standard (slim), so we need the gcc-specific ar and ranlib binaries
2016-12-15 10:27:53 +00:00
if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT ( CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9.0 ) AND NOT OPENBSD AND NOT DRAGONFLY )
2016-09-17 03:59:46 +00:00
# When invoking cmake on distributions on which gcc's binaries are prefixed
# with an arch-specific triplet, the user must specify -DCHOST=<prefix>
2016-09-01 02:21:22 +00:00
if ( DEFINED CHOST )
2016-09-17 03:59:46 +00:00
set ( CMAKE_AR "${CHOST}-gcc-ar" )
set ( CMAKE_RANLIB "${CHOST}-gcc-ranlib" )
2016-09-10 20:56:40 +00:00
else ( )
set ( CMAKE_AR "gcc-ar" )
set ( CMAKE_RANLIB "gcc-ranlib" )
2016-09-01 02:21:22 +00:00
endif ( )
2014-08-06 16:52:25 +00:00
endif ( )
2014-05-22 11:00:48 +00:00
endif ( )
2014-08-06 16:52:25 +00:00
2014-03-03 22:07:58 +00:00
set ( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${DEBUG_FLAGS}" )
set ( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${DEBUG_FLAGS}" )
set ( CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${RELEASE_FLAGS}" )
set ( CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${RELEASE_FLAGS}" )
2016-07-26 01:06:12 +00:00
2016-08-27 22:47:07 +00:00
if ( STATIC )
# STATIC already configures most deps to be linked in statically,
# here we make more deps static if the platform permits it
if ( MINGW )
# On Windows, this is as close to fully-static as we get:
# this leaves only deps on /c/Windows/system32/*.dll
set ( STATIC_FLAGS "-static" )
2016-12-15 10:27:53 +00:00
elseif ( NOT ( APPLE OR FREEBSD OR OPENBSD OR DRAGONFLY ) )
2016-08-27 22:47:07 +00:00
# On Linux, we don't support fully static build, but these can be static
set ( STATIC_FLAGS "-static-libgcc -static-libstdc++" )
endif ( )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${STATIC_FLAGS} " )
2016-07-26 02:30:59 +00:00
endif ( )
2014-03-03 22:07:58 +00:00
endif ( )
2015-01-05 19:30:17 +00:00
if ( ${ BOOST_IGNORE_SYSTEM_PATHS } STREQUAL "ON" )
2014-10-21 18:24:49 +00:00
set ( Boost_NO_SYSTEM_PATHS TRUE )
2014-09-22 10:30:53 +00:00
endif ( )
2015-03-22 09:26:30 +00:00
set ( OLD_LIB_SUFFIXES ${ CMAKE_FIND_LIBRARY_SUFFIXES } )
2014-03-03 22:07:58 +00:00
if ( STATIC )
2015-03-22 09:26:30 +00:00
if ( MINGW )
set ( CMAKE_FIND_LIBRARY_SUFFIXES .a )
endif ( )
2014-03-03 22:07:58 +00:00
set ( Boost_USE_STATIC_LIBS ON )
set ( Boost_USE_STATIC_RUNTIME ON )
endif ( )
2016-08-16 12:59:15 +00:00
find_package ( Boost 1.58 QUIET REQUIRED COMPONENTS system filesystem thread date_time chrono regex serialization program_options )
2014-09-29 18:13:15 +00:00
2015-03-22 09:26:30 +00:00
set ( CMAKE_FIND_LIBRARY_SUFFIXES ${ OLD_LIB_SUFFIXES } )
2014-09-29 18:13:15 +00:00
if ( NOT Boost_FOUND )
2016-08-16 12:59:15 +00:00
die ( "Could not find Boost libraries, please make sure you have installed Boost or libboost-all-dev (1.58) or the equivalent" )
2016-10-27 20:38:29 +00:00
elseif ( Boost_FOUND )
message ( STATUS "Found Boost Version: ${Boost_VERSION}" )
2014-03-03 22:07:58 +00:00
endif ( )
2014-09-29 18:13:15 +00:00
2014-03-03 22:07:58 +00:00
include_directories ( SYSTEM ${ Boost_INCLUDE_DIRS } )
if ( MINGW )
2014-10-23 18:04:08 +00:00
set ( EXTRA_LIBRARIES mswsock;ws2_32;iphlpapi )
2017-02-08 22:50:14 +00:00
elseif ( APPLE OR OPENBSD OR ANDROID )
2014-10-06 20:29:07 +00:00
set ( EXTRA_LIBRARIES "" )
2017-02-08 22:50:14 +00:00
elseif ( FREEBSD )
set ( EXTRA_LIBRARIES execinfo )
2016-12-15 10:27:53 +00:00
elseif ( DRAGONFLY )
find_library ( COMPAT compat )
2017-02-08 22:50:14 +00:00
set ( EXTRA_LIBRARIES execinfo ${ COMPAT } )
2014-04-30 20:50:06 +00:00
elseif ( NOT MSVC )
2014-10-06 20:29:07 +00:00
find_library ( RT rt )
2016-02-12 21:25:39 +00:00
set ( EXTRA_LIBRARIES ${ RT } )
2014-03-03 22:07:58 +00:00
endif ( )
2016-07-10 22:57:53 +00:00
list ( APPEND EXTRA_LIBRARIES ${ CMAKE_DL_LIBS } )
2016-03-28 18:00:18 +00:00
2017-05-29 22:39:49 +00:00
option ( USE_READLINE "Build with GNU readline support." ON )
if ( USE_READLINE )
find_package ( Readline )
if ( READLINE_FOUND AND GNU_READLINE_FOUND )
add_definitions ( -DHAVE_READLINE )
include_directories ( ${ Readline_INCLUDE_DIR } )
list ( APPEND EXTRA_LIBRARIES ${ Readline_LIBRARY } )
message ( STATUS "Found readline library at: ${Readline_ROOT_DIR}" )
else ( )
message ( STATUS "Could not find GNU readline library so building without readline support" )
endif ( )
endif ( )
2017-01-21 04:15:00 +00:00
if ( ANDROID )
set ( ATOMIC libatomic.a )
endif ( )
2017-04-02 10:19:25 +00:00
if ( CMAKE_C_COMPILER_ID STREQUAL "Clang" AND ARCH_WIDTH EQUAL "32" AND NOT IOS )
2016-07-29 03:26:51 +00:00
find_library ( ATOMIC atomic )
list ( APPEND EXTRA_LIBRARIES ${ ATOMIC } )
2016-03-28 18:00:18 +00:00
endif ( )
2014-10-21 17:52:24 +00:00
include ( version.cmake )
2014-03-03 22:07:58 +00:00
2017-09-05 16:20:40 +00:00
find_path ( ZMQ_INCLUDE_PATH zmq.hpp )
2017-09-05 16:20:27 +00:00
find_library ( ZMQ_LIB zmq )
2017-09-19 23:47:24 +00:00
find_library ( SODIUM_LIBRARY sodium )
2017-09-05 16:20:27 +00:00
2017-09-05 16:20:40 +00:00
if ( NOT ZMQ_INCLUDE_PATH )
message ( FATAL_ERROR "Could not find required header zmq.hpp" )
endif ( )
if ( NOT ZMQ_LIB )
2017-09-19 23:47:24 +00:00
message ( FATAL_ERROR "Could not find required libzmq" )
endif ( )
if ( SODIUM_LIBRARY )
set ( ZMQ_LIB "${ZMQ_LIB};${SODIUM_LIBRARY}" )
2017-09-05 16:20:40 +00:00
endif ( )
2017-10-09 00:45:45 +00:00
add_subdirectory ( contrib )
add_subdirectory ( src )
2016-05-27 07:42:31 +00:00
2014-12-01 18:00:22 +00:00
if ( BUILD_TESTS )
add_subdirectory ( tests )
2015-01-02 16:52:46 +00:00
endif ( )
2015-04-01 17:00:45 +00:00
if ( BUILD_DOCUMENTATION )
set ( DOC_GRAPHS "YES" CACHE STRING "Create dependency graphs (needs graphviz)" )
set ( DOC_FULLGRAPHS "NO" CACHE STRING "Create call/callee graphs (large)" )
find_program ( DOT_PATH dot )
if ( DOT_PATH STREQUAL "DOT_PATH-NOTFOUND" )
message ( "Doxygen: graphviz not found - graphs disabled" )
set ( DOC_GRAPHS "NO" )
endif ( )
find_package ( Doxygen )
if ( DOXYGEN_FOUND )
configure_file ( "cmake/Doxyfile.in" "Doxyfile" @ONLY )
configure_file ( "cmake/Doxygen.extra.css.in" "Doxygen.extra.css" @ONLY )
add_custom_target ( doc
$ { D O X Y G E N _ E X E C U T A B L E } $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / D o x y f i l e
W O R K I N G _ D I R E C T O R Y $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R }
C O M M E N T " G e n e r a t i n g A P I d o c u m e n t a t i o n w i t h D o x y g e n . . " V E R B A T I M )
endif ( )
endif ( )
2016-09-03 12:11:26 +00:00
# when ON - will install libwallet_merged into "lib"
2016-09-03 10:32:06 +00:00
option ( BUILD_GUI_DEPS "Build GUI dependencies." OFF )
2016-09-03 12:11:26 +00:00
# This is not nice, distribution packagers should not enable this, but depend
# on libunbound shipped with their distribution instead
option ( INSTALL_VENDORED_LIBUNBOUND "Install libunbound binary built from source vendored with this repo." OFF )
2016-09-03 10:32:06 +00:00