2018-01-03 11:17:50 +00:00
// Copyright (c) 2014-2017, The Monero Project, The Danicoin Project
2014-10-21 20:33:43 +00:00
// All rights reserved.
2015-12-14 04:54:39 +00:00
//
2014-10-21 20:33:43 +00:00
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
2015-12-14 04:54:39 +00:00
//
2014-10-21 20:33:43 +00:00
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
2015-12-14 04:54:39 +00:00
//
2014-10-21 20:33:43 +00:00
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
2015-12-14 04:54:39 +00:00
//
2014-10-21 20:33:43 +00:00
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
2015-12-14 04:54:39 +00:00
//
2014-10-21 20:33:43 +00:00
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# include "db_lmdb.h"
2014-10-23 19:37:10 +00:00
# include <boost/filesystem.hpp>
2015-08-04 21:59:42 +00:00
# include <boost/format.hpp>
2015-12-14 18:47:13 +00:00
# include <boost/current_function.hpp>
2014-10-23 19:37:10 +00:00
# include <memory> // std::unique_ptr
# include <cstring> // memcpy
2015-07-12 04:24:42 +00:00
# include <random>
2014-10-23 19:37:10 +00:00
2017-01-26 15:07:23 +00:00
# include "cryptonote_basic/cryptonote_format_utils.h"
2014-10-28 00:45:33 +00:00
# include "crypto/crypto.h"
2015-02-11 23:55:53 +00:00
# include "profile_tools.h"
2017-07-31 15:36:52 +00:00
# include "ringct/rctOps.h"
2014-10-23 19:37:10 +00:00
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
# undef MONERO_DEFAULT_LOG_CATEGORY
# define MONERO_DEFAULT_LOG_CATEGORY "blockchain.db.lmdb"
2016-02-25 13:55:49 +00:00
# if defined(__i386) || defined(__x86_64)
# define MISALIGNED_OK 1
# endif
2014-10-29 02:25:03 +00:00
using epee : : string_tools : : pod_to_hex ;
2015-10-26 15:52:07 +00:00
// Increase when the DB changes in a non backward compatible way, and there
// is no automatic conversion, so that a full resync is needed.
2016-03-27 21:43:16 +00:00
# define VERSION 1
2015-10-26 15:52:07 +00:00
2014-10-28 00:45:33 +00:00
namespace
{
2016-12-01 14:26:18 +00:00
# pragma pack(push, 1)
// This MUST be identical to output_data_t, without the extra rct data at the end
2016-06-29 18:55:49 +00:00
struct pre_rct_output_data_t
{
crypto : : public_key pubkey ; //!< the output's public key (for spend verification)
uint64_t unlock_time ; //!< the output's unlock time (or height)
uint64_t height ; //!< the height of the block which created the output
} ;
2016-12-01 14:26:18 +00:00
# pragma pack(pop)
2016-06-29 18:55:49 +00:00
2015-01-09 12:29:05 +00:00
template < typename T >
inline void throw0 ( const T & e )
2014-12-12 21:34:45 +00:00
{
LOG_PRINT_L0 ( e . what ( ) ) ;
throw e ;
}
2015-01-09 12:29:05 +00:00
template < typename T >
inline void throw1 ( const T & e )
2014-12-12 21:34:45 +00:00
{
LOG_PRINT_L1 ( e . what ( ) ) ;
throw e ;
}
2016-03-27 21:43:16 +00:00
# define MDB_val_set(var, val) MDB_val var = {sizeof(val), (void *)&val}
2014-12-12 13:27:05 +00:00
template < typename T >
struct MDB_val_copy : public MDB_val
{
2015-12-14 04:54:39 +00:00
MDB_val_copy ( const T & t ) :
t_copy ( t )
2014-12-12 13:27:05 +00:00
{
mv_size = sizeof ( T ) ;
mv_data = & t_copy ;
}
private :
T t_copy ;
} ;
template < >
struct MDB_val_copy < cryptonote : : blobdata > : public MDB_val
{
2015-12-14 04:54:39 +00:00
MDB_val_copy ( const cryptonote : : blobdata & bd ) :
data ( new char [ bd . size ( ) ] )
2014-12-12 13:27:05 +00:00
{
memcpy ( data . get ( ) , bd . data ( ) , bd . size ( ) ) ;
mv_size = bd . size ( ) ;
mv_data = data . get ( ) ;
}
private :
std : : unique_ptr < char [ ] > data ;
} ;
2015-10-26 15:52:07 +00:00
template < >
struct MDB_val_copy < const char * > : public MDB_val
{
2015-12-28 19:22:37 +00:00
MDB_val_copy ( const char * s ) :
2015-12-29 00:09:10 +00:00
size ( strlen ( s ) + 1 ) , // include the NUL, makes it easier for compares
data ( new char [ size ] )
2015-10-26 15:52:07 +00:00
{
2015-12-29 00:09:10 +00:00
mv_size = size ;
2015-10-26 15:52:07 +00:00
mv_data = data . get ( ) ;
2015-12-29 00:09:10 +00:00
memcpy ( mv_data , s , size ) ;
2015-10-26 15:52:07 +00:00
}
private :
2015-12-29 00:09:10 +00:00
size_t size ;
2015-10-26 15:52:07 +00:00
std : : unique_ptr < char [ ] > data ;
} ;
2016-03-01 01:51:11 +00:00
int compare_uint64 ( const MDB_val * a , const MDB_val * b )
{
2016-03-06 09:18:51 +00:00
const uint64_t va = * ( const uint64_t * ) a - > mv_data ;
const uint64_t vb = * ( const uint64_t * ) b - > mv_data ;
return ( va < vb ) ? - 1 : va > vb ;
}
2016-03-01 01:51:11 +00:00
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
int compare_hash32 ( const MDB_val * a , const MDB_val * b )
{
2015-12-14 04:54:39 +00:00
uint32_t * va = ( uint32_t * ) a - > mv_data ;
uint32_t * vb = ( uint32_t * ) b - > mv_data ;
for ( int n = 7 ; n > = 0 ; n - - )
{
if ( va [ n ] = = vb [ n ] )
continue ;
return va [ n ] < vb [ n ] ? - 1 : 1 ;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2015-12-14 04:54:39 +00:00
return 0 ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
2015-10-26 15:52:07 +00:00
int compare_string ( const MDB_val * a , const MDB_val * b )
{
const char * va = ( const char * ) a - > mv_data ;
const char * vb = ( const char * ) b - > mv_data ;
return strcmp ( va , vb ) ;
}
2016-04-04 01:10:58 +00:00
/* DB schema:
*
* Table Key Data
* - - - - - - - - - - - -
* blocks block ID block blob
* block_heights block hash block height
* block_info block ID { block metadata }
*
* txs txn ID txn blob
* tx_indices txn hash { txn ID , metadata }
* tx_outputs txn ID [ txn amount output indices ]
*
* output_txs output ID { txn hash , local index }
* output_amounts amount [ { amount output index , metadata } . . . ]
*
2016-04-06 17:05:20 +00:00
* spent_keys input hash -
2016-04-04 01:10:58 +00:00
*
2017-05-14 13:06:55 +00:00
* txpool_meta txn hash txn metadata
* txpool_blob txn hash txn blob
*
2016-04-04 01:10:58 +00:00
* Note : where the data items are of uniform size , DUPFIXED tables have
* been used to save space . In most of these cases , a dummy " zerokval "
* key is used when accessing the table ; the Key listed above will be
* attached as a prefix on the Data to serve as the DUPSORT key .
* ( DUPFIXED saves 8 bytes per record . )
*
* The output_amounts table doesn ' t use a dummy key , but uses DUPSORT .
*/
2014-12-06 21:37:22 +00:00
const char * const LMDB_BLOCKS = " blocks " ;
const char * const LMDB_BLOCK_HEIGHTS = " block_heights " ;
2016-04-04 01:10:58 +00:00
const char * const LMDB_BLOCK_INFO = " block_info " ;
2014-12-06 21:37:22 +00:00
const char * const LMDB_TXS = " txs " ;
2016-03-04 17:38:15 +00:00
const char * const LMDB_TX_INDICES = " tx_indices " ;
2014-12-06 21:37:22 +00:00
const char * const LMDB_TX_OUTPUTS = " tx_outputs " ;
const char * const LMDB_OUTPUT_TXS = " output_txs " ;
const char * const LMDB_OUTPUT_AMOUNTS = " output_amounts " ;
const char * const LMDB_SPENT_KEYS = " spent_keys " ;
2014-10-21 20:33:43 +00:00
2017-05-14 13:06:55 +00:00
const char * const LMDB_TXPOOL_META = " txpool_meta " ;
const char * const LMDB_TXPOOL_BLOB = " txpool_blob " ;
2015-09-20 17:41:38 +00:00
const char * const LMDB_HF_STARTING_HEIGHTS = " hf_starting_heights " ;
const char * const LMDB_HF_VERSIONS = " hf_versions " ;
2015-10-26 15:52:07 +00:00
const char * const LMDB_PROPERTIES = " properties " ;
2016-03-06 05:05:49 +00:00
const char zerokey [ 8 ] = { 0 } ;
const MDB_val zerokval = { sizeof ( zerokey ) , ( void * ) zerokey } ;
2014-10-28 00:45:33 +00:00
2015-12-13 16:45:03 +00:00
const std : : string lmdb_error ( const std : : string & error_string , int mdb_res )
{
const std : : string full_string = error_string + mdb_strerror ( mdb_res ) ;
return full_string ;
}
2016-03-04 04:12:45 +00:00
inline void lmdb_db_open ( MDB_txn * txn , const char * name , int flags , MDB_dbi & dbi , const std : : string & error_string )
{
if ( auto res = mdb_dbi_open ( txn , name , flags , & dbi ) )
2017-07-30 22:59:52 +00:00
throw0 ( cryptonote : : DB_OPEN_FAILURE ( ( lmdb_error ( error_string + " : " , res ) + std : : string ( " - you may want to start with --db-salvage " ) ) . c_str ( ) ) ) ;
2016-03-04 04:12:45 +00:00
}
2014-10-28 00:45:33 +00:00
} // anonymous namespace
2016-01-07 06:33:22 +00:00
# define CURSOR(name) \
if ( ! m_cur_ # # name ) { \
int result = mdb_cursor_open ( * m_write_txn , m_ # # name , & m_cur_ # # name ) ; \
if ( result ) \
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open cursor: " , result ) . c_str ( ) ) ) ; \
2016-01-07 06:33:22 +00:00
}
2016-02-18 12:09:57 +00:00
# define RCURSOR(name) \
if ( ! m_cur_ # # name ) { \
int result = mdb_cursor_open ( m_txn , m_ # # name , ( MDB_cursor * * ) & m_cur_ # # name ) ; \
if ( result ) \
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open cursor: " , result ) . c_str ( ) ) ) ; \
2016-03-14 18:26:15 +00:00
if ( m_cursors ! = & m_wcursors ) \
2016-02-18 12:09:57 +00:00
m_tinfo - > m_ti_rflags . m_rf_ # # name = true ; \
2016-03-14 18:26:15 +00:00
} else if ( m_cursors ! = & m_wcursors & & ! m_tinfo - > m_ti_rflags . m_rf_ # # name ) { \
int result = mdb_cursor_renew ( m_txn , m_cur_ # # name ) ; \
if ( result ) \
throw0 ( DB_ERROR ( lmdb_error ( " Failed to renew cursor: " , result ) . c_str ( ) ) ) ; \
2016-02-18 12:09:57 +00:00
m_tinfo - > m_ti_rflags . m_rf_ # # name = true ; \
}
2014-10-28 00:45:33 +00:00
namespace cryptonote
{
2016-03-03 04:03:04 +00:00
typedef struct mdb_block_info
{
uint64_t bi_height ;
uint64_t bi_timestamp ;
uint64_t bi_coins ;
2016-04-05 20:13:16 +00:00
uint64_t bi_size ; // a size_t really but we need 32-bit compat
2016-03-03 04:03:04 +00:00
difficulty_type bi_diff ;
crypto : : hash bi_hash ;
} mdb_block_info ;
2016-03-06 07:08:22 +00:00
typedef struct blk_height {
2016-04-04 01:10:58 +00:00
crypto : : hash bh_hash ;
uint64_t bh_height ;
2016-03-06 07:08:22 +00:00
} blk_height ;
typedef struct txindex {
crypto : : hash key ;
tx_data_t data ;
} txindex ;
2016-06-29 18:55:49 +00:00
typedef struct pre_rct_outkey {
uint64_t amount_index ;
uint64_t output_id ;
pre_rct_output_data_t data ;
} pre_rct_outkey ;
2016-03-06 09:18:51 +00:00
typedef struct outkey {
2016-03-27 21:43:16 +00:00
uint64_t amount_index ;
2016-04-04 01:10:58 +00:00
uint64_t output_id ;
2016-03-06 09:18:51 +00:00
output_data_t data ;
} outkey ;
2016-03-27 21:43:16 +00:00
typedef struct outtx {
2016-04-04 01:10:58 +00:00
uint64_t output_id ;
2016-03-27 21:43:16 +00:00
crypto : : hash tx_hash ;
uint64_t local_index ;
} outtx ;
2015-05-17 02:05:54 +00:00
std : : atomic < uint64_t > mdb_txn_safe : : num_active_txns { 0 } ;
std : : atomic_flag mdb_txn_safe : : creation_gate = ATOMIC_FLAG_INIT ;
2014-10-28 00:45:33 +00:00
2016-02-18 12:09:57 +00:00
mdb_threadinfo : : ~ mdb_threadinfo ( )
{
MDB_cursor * * cur = & m_ti_rcursors . m_txc_blocks ;
unsigned i ;
for ( i = 0 ; i < sizeof ( mdb_txn_cursors ) / sizeof ( MDB_cursor * ) ; i + + )
if ( cur [ i ] )
mdb_cursor_close ( cur [ i ] ) ;
if ( m_ti_rtxn )
mdb_txn_abort ( m_ti_rtxn ) ;
}
2016-03-16 10:24:48 +00:00
mdb_txn_safe : : mdb_txn_safe ( const bool check ) : m_txn ( NULL ) , m_tinfo ( NULL ) , m_check ( check )
2015-05-17 02:05:54 +00:00
{
2016-03-16 10:24:48 +00:00
if ( check )
{
while ( creation_gate . test_and_set ( ) ) ;
num_active_txns + + ;
creation_gate . clear ( ) ;
}
2015-05-17 02:05:54 +00:00
}
mdb_txn_safe : : ~ mdb_txn_safe ( )
{
2016-03-19 12:59:05 +00:00
if ( ! m_check )
return ;
2015-05-17 02:05:54 +00:00
LOG_PRINT_L3 ( " mdb_txn_safe: destructor " ) ;
2016-03-16 10:24:48 +00:00
if ( m_tinfo ! = nullptr )
{
mdb_txn_reset ( m_tinfo - > m_ti_rtxn ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
} else if ( m_txn ! = nullptr )
2015-05-16 00:42:47 +00:00
{
2015-05-17 02:05:54 +00:00
if ( m_batch_txn ) // this is a batch txn and should have been handled before this point for safety
2015-05-16 00:42:47 +00:00
{
2015-05-17 02:05:54 +00:00
LOG_PRINT_L0 ( " WARNING: mdb_txn_safe: m_txn is a batch txn and it's not NULL in destructor - calling mdb_txn_abort() " ) ;
}
else
{
// Example of when this occurs: a lookup fails, so a read-only txn is
// aborted through this destructor. However, successful read-only txns
// ideally should have been committed when done and not end up here.
//
// NOTE: not sure if this is ever reached for a non-batch write
// transaction, but it's probably not ideal if it did.
LOG_PRINT_L3 ( " mdb_txn_safe: m_txn not NULL in destructor - calling mdb_txn_abort() " ) ;
2015-05-16 00:42:47 +00:00
}
2015-05-17 02:05:54 +00:00
mdb_txn_abort ( m_txn ) ;
2015-05-16 00:42:47 +00:00
}
2016-03-19 12:59:05 +00:00
num_active_txns - - ;
2015-05-17 02:05:54 +00:00
}
2015-05-16 00:42:47 +00:00
2015-05-17 02:05:54 +00:00
void mdb_txn_safe : : commit ( std : : string message )
{
if ( message . size ( ) = = 0 )
2015-05-16 00:42:47 +00:00
{
2015-05-17 02:05:54 +00:00
message = " Failed to commit a transaction to the db " ;
}
2015-05-16 00:42:47 +00:00
2015-05-30 04:07:54 +00:00
if ( auto result = mdb_txn_commit ( m_txn ) )
2015-05-17 02:05:54 +00:00
{
2016-02-13 11:35:48 +00:00
m_txn = nullptr ;
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( message + " : " , result ) . c_str ( ) ) ) ;
2015-05-16 00:42:47 +00:00
}
2016-02-13 11:35:48 +00:00
m_txn = nullptr ;
2015-05-17 02:05:54 +00:00
}
2015-05-16 00:42:47 +00:00
2015-05-17 02:05:54 +00:00
void mdb_txn_safe : : abort ( )
{
LOG_PRINT_L3 ( " mdb_txn_safe: abort() " ) ;
2016-02-13 11:35:48 +00:00
if ( m_txn ! = nullptr )
2015-05-16 00:42:47 +00:00
{
2015-05-17 02:05:54 +00:00
mdb_txn_abort ( m_txn ) ;
2016-02-13 11:35:48 +00:00
m_txn = nullptr ;
2015-05-17 02:05:54 +00:00
}
else
{
LOG_PRINT_L0 ( " WARNING: mdb_txn_safe: abort() called, but m_txn is NULL " ) ;
}
}
2015-05-27 18:03:46 +00:00
uint64_t mdb_txn_safe : : num_active_tx ( ) const
2015-05-17 02:05:54 +00:00
{
return num_active_txns ;
}
void mdb_txn_safe : : prevent_new_txns ( )
{
while ( creation_gate . test_and_set ( ) ) ;
}
void mdb_txn_safe : : wait_no_active_txns ( )
{
while ( num_active_txns > 0 ) ;
}
void mdb_txn_safe : : allow_new_txns ( )
{
creation_gate . clear ( ) ;
}
2017-02-18 21:00:55 +00:00
void lmdb_resized ( MDB_env * env )
{
mdb_txn_safe : : prevent_new_txns ( ) ;
MGINFO ( " LMDB map resize detected. " ) ;
MDB_envinfo mei ;
mdb_env_info ( env , & mei ) ;
uint64_t old = mei . me_mapsize ;
mdb_txn_safe : : wait_no_active_txns ( ) ;
int result = mdb_env_set_mapsize ( env , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to set new mapsize: " , result ) . c_str ( ) ) ) ;
mdb_env_info ( env , & mei ) ;
uint64_t new_mapsize = mei . me_mapsize ;
2015-05-17 02:05:54 +00:00
2017-02-18 21:00:55 +00:00
MGINFO ( " LMDB Mapsize increased. " < < " Old: " < < old / ( 1024 * 1024 ) < < " MiB " < < " , New: " < < new_mapsize / ( 1024 * 1024 ) < < " MiB " ) ;
mdb_txn_safe : : allow_new_txns ( ) ;
}
inline int lmdb_txn_begin ( MDB_env * env , MDB_txn * parent , unsigned int flags , MDB_txn * * txn )
{
int res = mdb_txn_begin ( env , parent , flags , txn ) ;
if ( res = = MDB_MAP_RESIZED ) {
lmdb_resized ( env ) ;
res = mdb_txn_begin ( env , parent , flags , txn ) ;
}
return res ;
}
inline int lmdb_txn_renew ( MDB_txn * txn )
{
int res = mdb_txn_renew ( txn ) ;
if ( res = = MDB_MAP_RESIZED ) {
lmdb_resized ( mdb_txn_env ( txn ) ) ;
res = mdb_txn_renew ( txn ) ;
}
return res ;
}
2015-05-17 02:05:54 +00:00
2015-07-12 05:46:16 +00:00
void BlockchainLMDB : : do_resize ( uint64_t increase_size )
2015-05-17 02:05:54 +00:00
{
2015-08-04 21:59:42 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
CRITICAL_REGION_LOCAL ( m_synchronization_lock ) ;
const uint64_t add_size = 1LL < < 30 ;
// check disk capacity
try
{
2015-12-14 04:54:39 +00:00
boost : : filesystem : : path path ( m_folder ) ;
boost : : filesystem : : space_info si = boost : : filesystem : : space ( path ) ;
if ( si . available < add_size )
{
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MERROR ( " !! WARNING: Insufficient free space to extend database !!: " < < si . available / 1LL < < 20L ) ;
2015-12-14 04:54:39 +00:00
return ;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
catch ( . . . )
{
2015-12-14 04:54:39 +00:00
// print something but proceed.
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MWARNING ( " Unable to query free disk space. " ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
2015-05-17 02:05:54 +00:00
MDB_envinfo mei ;
mdb_env_info ( m_env , & mei ) ;
MDB_stat mst ;
mdb_env_stat ( m_env , & mst ) ;
2015-07-15 05:47:07 +00:00
// add 1Gb per resize, instead of doing a percentage increase
uint64_t new_mapsize = ( double ) mei . me_mapsize + add_size ;
2015-07-12 05:46:16 +00:00
// If given, use increase_size intead of above way of resizing.
// This is currently used for increasing by an estimated size at start of new
// batch txn.
if ( increase_size > 0 )
new_mapsize = mei . me_mapsize + increase_size ;
2015-05-17 02:05:54 +00:00
new_mapsize + = ( new_mapsize % mst . ms_psize ) ;
mdb_txn_safe : : prevent_new_txns ( ) ;
if ( m_write_txn ! = nullptr )
{
if ( m_batch_active )
2015-05-16 00:42:47 +00:00
{
2015-05-17 02:05:54 +00:00
throw0 ( DB_ERROR ( " lmdb resizing not yet supported when batch transactions enabled! " ) ) ;
2015-05-16 00:42:47 +00:00
}
else
{
2015-05-17 02:05:54 +00:00
throw0 ( DB_ERROR ( " attempting resize with write transaction in progress, this should not happen! " ) ) ;
2015-05-16 00:42:47 +00:00
}
}
2014-10-28 00:45:33 +00:00
2015-05-17 02:05:54 +00:00
mdb_txn_safe : : wait_no_active_txns ( ) ;
2016-03-30 19:36:26 +00:00
int result = mdb_env_set_mapsize ( m_env , new_mapsize ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to set new mapsize: " , result ) . c_str ( ) ) ) ;
2015-05-17 02:05:54 +00:00
2017-01-16 20:14:56 +00:00
MGINFO ( " LMDB Mapsize increased. " < < " Old: " < < mei . me_mapsize / ( 1024 * 1024 ) < < " MiB " < < " , New: " < < new_mapsize / ( 1024 * 1024 ) < < " MiB " ) ;
2015-05-17 02:05:54 +00:00
mdb_txn_safe : : allow_new_txns ( ) ;
}
2015-07-12 05:46:16 +00:00
// threshold_size is used for batch transactions
bool BlockchainLMDB : : need_resize ( uint64_t threshold_size ) const
2015-05-17 02:05:54 +00:00
{
2015-08-04 21:59:42 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
# if defined(ENABLE_AUTO_RESIZE)
2015-05-17 02:05:54 +00:00
MDB_envinfo mei ;
mdb_env_info ( m_env , & mei ) ;
MDB_stat mst ;
mdb_env_stat ( m_env , & mst ) ;
2015-07-12 05:46:16 +00:00
// size_used doesn't include data yet to be committed, which can be
// significant size during batch transactions. For that, we estimate the size
// needed at the beginning of the batch transaction and pass in the
// additional size needed.
2015-05-17 02:05:54 +00:00
uint64_t size_used = mst . ms_psize * mei . me_last_pgno ;
2015-07-12 05:46:16 +00:00
LOG_PRINT_L1 ( " DB map size: " < < mei . me_mapsize ) ;
LOG_PRINT_L1 ( " Space used: " < < size_used ) ;
LOG_PRINT_L1 ( " Space remaining: " < < mei . me_mapsize - size_used ) ;
LOG_PRINT_L1 ( " Size threshold: " < < threshold_size ) ;
2015-08-04 21:59:42 +00:00
float resize_percent_old = RESIZE_PERCENT ;
LOG_PRINT_L1 ( boost : : format ( " Percent used: %.04f Percent threshold: %.04f " ) % ( ( double ) size_used / mei . me_mapsize ) % resize_percent_old ) ;
2015-07-12 05:46:16 +00:00
if ( threshold_size > 0 )
{
if ( mei . me_mapsize - size_used < threshold_size )
{
LOG_PRINT_L1 ( " Threshold met (size-based) " ) ;
return true ;
}
else
return false ;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
std : : mt19937 engine ( std : : random_device { } ( ) ) ;
std : : uniform_real_distribution < double > fdis ( 0.6 , 0.9 ) ;
double resize_percent = fdis ( engine ) ;
if ( ( double ) size_used / mei . me_mapsize > resize_percent )
2015-05-17 02:05:54 +00:00
{
2015-07-12 05:46:16 +00:00
LOG_PRINT_L1 ( " Threshold met (percent-based) " ) ;
2015-05-17 02:05:54 +00:00
return true ;
}
return false ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
# else
2015-12-14 04:54:39 +00:00
return false ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
# endif
2015-05-17 02:05:54 +00:00
}
2015-03-03 21:09:49 +00:00
2015-07-12 05:46:16 +00:00
void BlockchainLMDB : : check_and_resize_for_batch ( uint64_t batch_num_blocks )
{
2015-08-04 21:59:42 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
LOG_PRINT_L1 ( " [ " < < __func__ < < " ] " < < " checking DB size " ) ;
2016-02-14 19:31:52 +00:00
const uint64_t min_increase_size = 512 * ( 1 < < 20 ) ;
2015-07-12 05:46:16 +00:00
uint64_t threshold_size = 0 ;
uint64_t increase_size = 0 ;
if ( batch_num_blocks > 0 )
{
threshold_size = get_estimated_batch_size ( batch_num_blocks ) ;
LOG_PRINT_L1 ( " calculated batch size: " < < threshold_size ) ;
// The increased DB size could be a multiple of threshold_size, a fixed
// size increase (> threshold_size), or other variations.
//
// Currently we use the greater of threshold size and a minimum size. The
// minimum size increase is used to avoid frequent resizes when the batch
// size is set to a very small numbers of blocks.
increase_size = ( threshold_size > min_increase_size ) ? threshold_size : min_increase_size ;
LOG_PRINT_L1 ( " increase size: " < < increase_size ) ;
}
// if threshold_size is 0 (i.e. number of blocks for batch not passed in), it
// will fall back to the percent-based threshold check instead of the
// size-based check
if ( need_resize ( threshold_size ) )
{
2017-01-16 20:14:56 +00:00
MGINFO ( " [batch] DB resize needed " ) ;
2015-07-12 05:46:16 +00:00
do_resize ( increase_size ) ;
}
}
uint64_t BlockchainLMDB : : get_estimated_batch_size ( uint64_t batch_num_blocks ) const
{
2015-08-04 21:59:42 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-07-12 05:46:16 +00:00
uint64_t threshold_size = 0 ;
// batch size estimate * batch safety factor = final size estimate
// Takes into account "reasonable" block size increases in batch.
float batch_safety_factor = 1.7f ;
2016-02-14 19:31:52 +00:00
float batch_fudge_factor = batch_safety_factor * batch_num_blocks ;
2015-07-12 05:46:16 +00:00
// estimate of stored block expanded from raw block, including denormalization and db overhead.
// Note that this probably doesn't grow linearly with block size.
float db_expand_factor = 4.5f ;
uint64_t num_prev_blocks = 500 ;
// For resizing purposes, allow for at least 4k average block size.
uint64_t min_block_size = 4 * 1024 ;
2015-08-04 21:59:42 +00:00
uint64_t block_stop = 0 ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2015-08-04 21:59:42 +00:00
if ( m_height > 1 )
block_stop = m_height - 1 ;
2015-07-12 05:46:16 +00:00
uint64_t block_start = 0 ;
if ( block_stop > = num_prev_blocks )
block_start = block_stop - num_prev_blocks + 1 ;
uint32_t num_blocks_used = 0 ;
uint64_t total_block_size = 0 ;
2015-08-04 21:59:42 +00:00
LOG_PRINT_L1 ( " [ " < < __func__ < < " ] " < < " m_height: " < < m_height < < " block_start: " < < block_start < < " block_stop: " < < block_stop ) ;
size_t avg_block_size = 0 ;
if ( m_height = = 0 )
2015-07-12 05:46:16 +00:00
{
2015-08-04 21:59:42 +00:00
LOG_PRINT_L1 ( " No existing blocks to check for average block size " ) ;
}
2017-02-18 17:27:33 +00:00
else if ( m_cum_count > = num_prev_blocks )
2016-02-14 20:27:43 +00:00
{
avg_block_size = m_cum_size / m_cum_count ;
LOG_PRINT_L1 ( " average block size across recent " < < m_cum_count < < " blocks: " < < avg_block_size ) ;
m_cum_size = 0 ;
m_cum_count = 0 ;
}
2015-08-04 21:59:42 +00:00
else
{
2017-02-18 17:27:33 +00:00
MDB_txn * rtxn ;
mdb_txn_cursors * rcurs ;
block_rtxn_start ( & rtxn , & rcurs ) ;
2015-08-04 21:59:42 +00:00
for ( uint64_t block_num = block_start ; block_num < = block_stop ; + + block_num )
{
uint32_t block_size = get_block_size ( block_num ) ;
total_block_size + = block_size ;
// Track number of blocks being totalled here instead of assuming, in case
// some blocks were to be skipped for being outliers.
+ + num_blocks_used ;
}
2017-02-18 17:27:33 +00:00
block_rtxn_stop ( ) ;
2015-08-04 21:59:42 +00:00
avg_block_size = total_block_size / num_blocks_used ;
LOG_PRINT_L1 ( " average block size across recent " < < num_blocks_used < < " blocks: " < < avg_block_size ) ;
2015-07-12 05:46:16 +00:00
}
if ( avg_block_size < min_block_size )
avg_block_size = min_block_size ;
LOG_PRINT_L1 ( " estimated average block size for batch: " < < avg_block_size ) ;
2016-02-14 19:31:52 +00:00
// bigger safety margin on smaller block sizes
if ( batch_fudge_factor < 5000.0 )
batch_fudge_factor = 5000.0 ;
threshold_size = avg_block_size * db_expand_factor * batch_fudge_factor ;
2015-07-12 05:46:16 +00:00
return threshold_size ;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
void BlockchainLMDB : : add_block ( const block & blk , const size_t & block_size , const difficulty_type & cumulative_difficulty , const uint64_t & coins_generated ,
2015-12-14 04:54:39 +00:00
const crypto : : hash & blk_hash )
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2016-02-18 12:09:57 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-23 19:37:10 +00:00
2016-01-07 06:33:22 +00:00
CURSOR ( block_heights )
2016-03-06 06:21:15 +00:00
blk_height bh = { blk_hash , m_height } ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( val_h , bh ) ;
2016-03-06 06:21:15 +00:00
if ( mdb_cursor_get ( m_cur_block_heights , ( MDB_val * ) & zerokval , & val_h , MDB_GET_BOTH ) = = 0 )
2014-12-12 21:34:45 +00:00
throw1 ( BLOCK_EXISTS ( " Attempting to add block that's already in the db " ) ) ;
2014-10-23 19:37:10 +00:00
if ( m_height > 0 )
{
2016-04-04 01:10:58 +00:00
MDB_val_set ( parent_key , blk . prev_id ) ;
2016-03-27 21:43:16 +00:00
int result = mdb_cursor_get ( m_cur_block_heights , ( MDB_val * ) & zerokval , & parent_key , MDB_GET_BOTH ) ;
2016-03-20 18:06:04 +00:00
if ( result )
2015-02-11 23:55:53 +00:00
{
LOG_PRINT_L3 ( " m_height: " < < m_height ) ;
LOG_PRINT_L3 ( " parent_key: " < < blk . prev_id ) ;
2016-03-20 18:06:04 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get top block hash to check for new block's parent: " , result ) . c_str ( ) ) ) ;
2015-02-11 23:55:53 +00:00
}
2016-03-06 06:21:15 +00:00
blk_height * prev = ( blk_height * ) parent_key . mv_data ;
2016-04-04 01:10:58 +00:00
if ( prev - > bh_height ! = m_height - 1 )
2014-12-12 21:34:45 +00:00
throw0 ( BLOCK_PARENT_DNE ( " Top block is not new block's parent " ) ) ;
2014-10-23 19:37:10 +00:00
}
2015-05-30 04:07:54 +00:00
int result = 0 ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( key , m_height ) ;
2014-10-23 19:37:10 +00:00
2016-01-07 06:33:22 +00:00
CURSOR ( blocks )
2016-03-03 04:03:04 +00:00
CURSOR ( block_info )
2016-01-07 06:33:22 +00:00
2017-02-11 10:16:18 +00:00
// this call to mdb_cursor_put will change height()
2014-12-12 13:27:05 +00:00
MDB_val_copy < blobdata > blob ( block_to_blob ( blk ) ) ;
2016-01-07 06:33:22 +00:00
result = mdb_cursor_put ( m_cur_blocks , & key , & blob , MDB_APPEND ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add block blob to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-23 19:37:10 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info bi ;
2016-03-28 17:26:37 +00:00
bi . bi_height = m_height ;
2016-03-03 04:03:04 +00:00
bi . bi_timestamp = blk . timestamp ;
bi . bi_coins = coins_generated ;
bi . bi_size = block_size ;
bi . bi_diff = cumulative_difficulty ;
bi . bi_hash = blk_hash ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( val , bi ) ;
2016-03-28 17:26:37 +00:00
result = mdb_cursor_put ( m_cur_block_info , ( MDB_val * ) & zerokval , & val , MDB_APPENDDUP ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-03 04:03:04 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add block info to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-06 06:21:15 +00:00
result = mdb_cursor_put ( m_cur_block_heights , ( MDB_val * ) & zerokval , & val_h , 0 ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add block height by hash to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-02-14 20:27:43 +00:00
m_cum_size + = block_size ;
m_cum_count + + ;
2014-10-21 20:33:43 +00:00
}
2014-10-23 19:37:10 +00:00
void BlockchainLMDB : : remove_block ( )
2014-10-21 20:33:43 +00:00
{
2016-03-20 18:06:04 +00:00
int result ;
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-28 00:45:33 +00:00
2014-12-12 23:20:41 +00:00
if ( m_height = = 0 )
throw0 ( BLOCK_DNE ( " Attempting to remove block from an empty blockchain " ) ) ;
2016-03-03 04:03:04 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( block_info )
2016-03-31 19:55:16 +00:00
CURSOR ( block_heights )
CURSOR ( blocks )
2014-12-12 13:27:05 +00:00
MDB_val_copy < uint64_t > k ( m_height - 1 ) ;
2016-03-28 17:26:37 +00:00
MDB_val h = k ;
if ( ( result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & h , MDB_GET_BOTH ) ) )
2016-03-20 18:06:04 +00:00
throw1 ( BLOCK_DNE ( lmdb_error ( " Attempting to remove block that's not in the db: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
// must use h now; deleting from m_block_info will invalidate it
mdb_block_info * bi = ( mdb_block_info * ) h . mv_data ;
2016-03-06 06:21:15 +00:00
blk_height bh = { bi - > bi_hash , 0 } ;
h . mv_data = ( void * ) & bh ;
h . mv_size = sizeof ( bh ) ;
2016-03-31 19:55:16 +00:00
if ( ( result = mdb_cursor_get ( m_cur_block_heights , ( MDB_val * ) & zerokval , & h , MDB_GET_BOTH ) ) )
throw1 ( DB_ERROR ( lmdb_error ( " Failed to locate block height by hash for removal: " , result ) . c_str ( ) ) ) ;
if ( ( result = mdb_cursor_del ( m_cur_block_heights , 0 ) ) )
2016-03-20 18:06:04 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Failed to add removal of block height by hash to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-31 19:55:16 +00:00
if ( ( result = mdb_cursor_get ( m_cur_blocks , & k , NULL , MDB_SET ) ) )
throw1 ( DB_ERROR ( lmdb_error ( " Failed to locate block for removal: " , result ) . c_str ( ) ) ) ;
if ( ( result = mdb_cursor_del ( m_cur_blocks , 0 ) ) )
2016-03-27 21:43:16 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Failed to add removal of block to db transaction: " , result ) . c_str ( ) ) ) ;
2016-03-03 04:03:04 +00:00
2016-03-27 21:43:16 +00:00
if ( ( result = mdb_cursor_del ( m_cur_block_info , 0 ) ) )
throw1 ( DB_ERROR ( lmdb_error ( " Failed to add removal of block info to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-21 20:33:43 +00:00
}
2016-03-04 19:37:41 +00:00
uint64_t BlockchainLMDB : : add_transaction_data ( const crypto : : hash & blk_hash , const transaction & tx , const crypto : : hash & tx_hash )
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2016-02-18 12:09:57 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-23 23:47:36 +00:00
2016-04-04 01:10:58 +00:00
int result ;
2017-03-03 16:12:31 +00:00
uint64_t tx_id = get_tx_count ( ) ;
2015-05-30 04:07:54 +00:00
2016-01-07 06:33:22 +00:00
CURSOR ( txs )
2016-03-04 17:38:15 +00:00
CURSOR ( tx_indices )
2016-01-07 06:33:22 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( val_tx_id , tx_id ) ;
MDB_val_set ( val_h , tx_hash ) ;
2016-03-06 07:08:22 +00:00
result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & val_h , MDB_GET_BOTH ) ;
if ( result = = 0 ) {
txindex * tip = ( txindex * ) val_h . mv_data ;
2016-04-04 01:10:58 +00:00
throw1 ( TX_EXISTS ( std : : string ( " Attempting to add transaction that's already in the db (tx id " ) . append ( boost : : lexical_cast < std : : string > ( tip - > data . tx_id ) ) . append ( " ) " ) . c_str ( ) ) ) ;
2016-03-06 07:08:22 +00:00
} else if ( result ! = MDB_NOTFOUND ) {
2016-03-04 17:38:15 +00:00
throw1 ( DB_ERROR ( lmdb_error ( std : : string ( " Error checking if tx index exists for tx hash " ) + epee : : string_tools : : pod_to_hex ( tx_hash ) + " : " , result ) . c_str ( ) ) ) ;
2016-03-06 07:08:22 +00:00
}
2014-10-23 23:47:36 +00:00
2016-04-04 01:10:58 +00:00
txindex ti ;
ti . key = tx_hash ;
ti . data . tx_id = tx_id ;
2016-03-06 07:08:22 +00:00
ti . data . unlock_time = tx . unlock_time ;
2016-04-04 01:10:58 +00:00
ti . data . block_id = m_height ; // we don't need blk_hash since we know m_height
2016-03-04 19:56:36 +00:00
2016-03-06 07:08:22 +00:00
val_h . mv_size = sizeof ( ti ) ;
val_h . mv_data = ( void * ) & ti ;
2014-10-23 23:47:36 +00:00
2016-03-06 07:08:22 +00:00
result = mdb_cursor_put ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & val_h , 0 ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-04 19:56:36 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add tx data to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2014-12-12 13:27:05 +00:00
MDB_val_copy < blobdata > blob ( tx_to_blob ( tx ) ) ;
2016-04-04 01:10:58 +00:00
result = mdb_cursor_put ( m_cur_txs , & val_tx_id , & blob , MDB_APPEND ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add tx blob to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-23 23:47:36 +00:00
2016-04-04 01:10:58 +00:00
return tx_id ;
2014-10-21 20:33:43 +00:00
}
2016-03-04 17:38:15 +00:00
// TODO: compare pros and cons of looking up the tx hash's tx index once and
// passing it in to functions like this
2015-01-12 02:04:04 +00:00
void BlockchainLMDB : : remove_transaction_data ( const crypto : : hash & tx_hash , const transaction & tx )
2014-10-21 20:33:43 +00:00
{
2016-03-20 18:06:04 +00:00
int result ;
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2016-03-06 07:08:22 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( tx_indices )
2016-03-31 19:55:16 +00:00
CURSOR ( txs )
CURSOR ( tx_outputs )
2016-03-06 07:08:22 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( val_h , tx_hash ) ;
2016-03-04 17:38:15 +00:00
2016-03-06 07:08:22 +00:00
if ( mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & val_h , MDB_GET_BOTH ) )
2014-12-12 21:34:45 +00:00
throw1 ( TX_DNE ( " Attempting to remove transaction that isn't in the db " ) ) ;
2016-03-06 07:08:22 +00:00
txindex * tip = ( txindex * ) val_h . mv_data ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( val_tx_id , tip - > data . tx_id ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
if ( ( result = mdb_cursor_get ( m_cur_txs , & val_tx_id , NULL , MDB_SET ) ) )
2016-03-31 19:55:16 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Failed to locate tx for removal: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( m_cur_txs , 0 ) ;
if ( result )
2016-03-20 18:06:04 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Failed to add removal of tx to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-30 22:33:35 +00:00
2016-04-04 01:10:58 +00:00
remove_tx_outputs ( tip - > data . tx_id , tx ) ;
2014-10-30 22:33:35 +00:00
2016-04-04 01:10:58 +00:00
result = mdb_cursor_get ( m_cur_tx_outputs , & val_tx_id , NULL , MDB_SET ) ;
2016-01-20 00:46:38 +00:00
if ( result = = MDB_NOTFOUND )
LOG_PRINT_L1 ( " tx has no outputs to remove: " < < tx_hash ) ;
else if ( result )
2016-03-31 19:55:16 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Failed to locate tx outputs for removal: " , result ) . c_str ( ) ) ) ;
if ( ! result )
{
result = mdb_cursor_del ( m_cur_tx_outputs , 0 ) ;
if ( result )
throw1 ( DB_ERROR ( lmdb_error ( " Failed to add removal of tx outputs to db transaction: " , result ) . c_str ( ) ) ) ;
}
2016-03-04 17:38:15 +00:00
2016-04-04 01:10:58 +00:00
// Don't delete the tx_indices entry until the end, after we're done with val_tx_id
2016-03-06 07:08:22 +00:00
if ( mdb_cursor_del ( m_cur_tx_indices , 0 ) )
2016-03-04 17:38:15 +00:00
throw1 ( DB_ERROR ( " Failed to add removal of tx index to db transaction " ) ) ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 01:10:58 +00:00
uint64_t BlockchainLMDB : : add_output ( const crypto : : hash & tx_hash ,
2016-01-31 13:10:14 +00:00
const tx_out & tx_output ,
const uint64_t & local_index ,
2016-06-29 18:55:49 +00:00
const uint64_t unlock_time ,
const rct : : key * commitment )
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2016-02-18 12:09:57 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2017-02-18 18:46:26 +00:00
uint64_t m_num_outputs = num_outputs ( ) ;
2014-10-23 23:47:36 +00:00
2015-05-30 04:07:54 +00:00
int result = 0 ;
2016-01-07 06:33:22 +00:00
CURSOR ( output_txs )
CURSOR ( output_amounts )
2016-03-27 21:43:16 +00:00
if ( tx_output . target . type ( ) ! = typeid ( txout_to_key ) )
throw0 ( DB_ERROR ( " Wrong output type: expected txout_to_key " ) ) ;
2016-06-29 18:55:49 +00:00
if ( tx_output . amount = = 0 & & ! commitment )
throw0 ( DB_ERROR ( " RCT output without commitment " ) ) ;
2014-10-23 23:47:36 +00:00
2016-03-28 17:26:37 +00:00
outtx ot = { m_num_outputs , tx_hash , local_index } ;
2016-03-27 21:43:16 +00:00
MDB_val_set ( vot , ot ) ;
2014-10-23 23:47:36 +00:00
2016-03-28 17:26:37 +00:00
result = mdb_cursor_put ( m_cur_output_txs , ( MDB_val * ) & zerokval , & vot , MDB_APPENDDUP ) ;
2015-05-30 04:07:54 +00:00
if ( result )
2016-03-27 21:43:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add output tx hash to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-23 23:47:36 +00:00
2016-03-27 21:43:16 +00:00
outkey ok ;
MDB_val data ;
2014-12-12 13:27:05 +00:00
MDB_val_copy < uint64_t > val_amount ( tx_output . amount ) ;
2016-03-27 21:43:16 +00:00
result = mdb_cursor_get ( m_cur_output_amounts , & val_amount , & data , MDB_SET ) ;
if ( ! result )
{
mdb_size_t num_elems = 0 ;
result = mdb_cursor_count ( m_cur_output_amounts , & num_elems ) ;
if ( result )
throw0 ( DB_ERROR ( std : : string ( " Failed to get number of outputs for amount: " ) . append ( mdb_strerror ( result ) ) . c_str ( ) ) ) ;
ok . amount_index = num_elems ;
}
else if ( result ! = MDB_NOTFOUND )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get output amount in db transaction: " , result ) . c_str ( ) ) ) ;
2015-12-25 21:56:37 +00:00
else
2016-03-27 21:43:16 +00:00
ok . amount_index = 0 ;
2016-04-04 01:10:58 +00:00
ok . output_id = m_num_outputs ;
2016-03-27 21:43:16 +00:00
ok . data . pubkey = boost : : get < txout_to_key > ( tx_output . target ) . key ;
ok . data . unlock_time = unlock_time ;
ok . data . height = m_height ;
2016-06-29 18:55:49 +00:00
if ( tx_output . amount = = 0 )
{
ok . data . commitment = * commitment ;
data . mv_size = sizeof ( ok ) ;
}
else
{
data . mv_size = sizeof ( pre_rct_outkey ) ;
}
2016-03-27 21:43:16 +00:00
data . mv_data = & ok ;
if ( ( result = mdb_cursor_put ( m_cur_output_amounts , & val_amount , & data , MDB_APPENDDUP ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to add output pubkey to db transaction: " , result ) . c_str ( ) ) ) ;
2014-10-31 21:34:15 +00:00
2016-04-04 01:10:58 +00:00
return ok . amount_index ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 01:10:58 +00:00
void BlockchainLMDB : : add_tx_amount_output_indices ( const uint64_t tx_id ,
const std : : vector < uint64_t > & amount_output_indices )
2014-10-30 22:33:35 +00:00
{
2015-01-12 02:04:04 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-01-31 13:10:14 +00:00
check_open ( ) ;
2016-03-03 19:06:27 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( tx_outputs )
2014-10-30 22:33:35 +00:00
2016-01-31 13:10:14 +00:00
int result = 0 ;
2014-10-30 22:33:35 +00:00
2016-01-31 13:10:14 +00:00
int num_outputs = amount_output_indices . size ( ) ;
2014-10-30 22:33:35 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( k_tx_id , tx_id ) ;
2016-01-31 13:10:14 +00:00
MDB_val v ;
2016-03-27 21:43:16 +00:00
v . mv_data = ( void * ) amount_output_indices . data ( ) ;
v . mv_size = sizeof ( uint64_t ) * num_outputs ;
2016-01-31 13:10:14 +00:00
// LOG_PRINT_L1("tx_outputs[tx_hash] size: " << v.mv_size);
2014-10-30 22:33:35 +00:00
2016-04-04 01:10:58 +00:00
result = mdb_cursor_put ( m_cur_tx_outputs , & k_tx_id , & v , MDB_APPEND ) ;
2016-01-31 13:10:14 +00:00
if ( result )
throw0 ( DB_ERROR ( std : : string ( " Failed to add <tx hash, amount output index array> to db transaction: " ) . append ( mdb_strerror ( result ) ) . c_str ( ) ) ) ;
2014-10-30 22:33:35 +00:00
}
2016-04-04 01:10:58 +00:00
void BlockchainLMDB : : remove_tx_outputs ( const uint64_t tx_id , const transaction & tx )
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-24 00:32:31 +00:00
2016-04-04 01:10:58 +00:00
std : : vector < uint64_t > amount_output_indices = get_tx_amount_output_indices ( tx_id ) ;
2014-10-24 00:32:31 +00:00
2016-03-27 21:43:16 +00:00
if ( amount_output_indices . empty ( ) )
2015-01-12 02:04:04 +00:00
{
2016-01-31 13:10:14 +00:00
if ( tx . vout . empty ( ) )
2016-03-27 21:43:16 +00:00
LOG_PRINT_L2 ( " tx has no outputs, so no output indices " ) ;
2016-01-31 13:10:14 +00:00
else
2016-03-27 21:43:16 +00:00
throw0 ( DB_ERROR ( " tx has outputs, but no output indices found " ) ) ;
2015-01-12 02:04:04 +00:00
}
2014-10-24 00:32:31 +00:00
2016-12-01 14:28:09 +00:00
bool is_pseudo_rct = tx . version > = 2 & & tx . vin . size ( ) = = 1 & & tx . vin [ 0 ] . type ( ) = = typeid ( txin_gen ) ;
for ( size_t i = tx . vout . size ( ) ; i - - > 0 ; )
2014-10-31 21:34:15 +00:00
{
2016-12-01 14:28:09 +00:00
uint64_t amount = is_pseudo_rct ? 0 : tx . vout [ i ] . amount ;
remove_output ( amount , amount_output_indices [ i ] ) ;
2014-10-31 21:34:15 +00:00
}
2014-10-21 20:33:43 +00:00
}
2016-04-04 16:28:31 +00:00
void BlockchainLMDB : : remove_output ( const uint64_t amount , const uint64_t & out_index )
2015-01-12 02:04:04 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2016-03-03 19:06:27 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( output_amounts ) ;
2016-03-28 17:26:37 +00:00
CURSOR ( output_txs ) ;
2015-01-12 02:04:04 +00:00
2016-03-27 21:43:16 +00:00
MDB_val_set ( k , amount ) ;
2016-03-28 17:26:37 +00:00
MDB_val_set ( v , out_index ) ;
2015-01-12 02:04:04 +00:00
2016-03-27 21:43:16 +00:00
auto result = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_GET_BOTH ) ;
2015-01-12 02:04:04 +00:00
if ( result = = MDB_NOTFOUND )
throw1 ( OUTPUT_DNE ( " Attempting to get an output index by amount and amount index, but amount not found " ) ) ;
else if ( result )
2016-03-09 18:24:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to get an output " , result ) . c_str ( ) ) ) ;
2015-01-12 02:04:04 +00:00
2016-12-01 14:28:09 +00:00
const pre_rct_outkey * ok = ( const pre_rct_outkey * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( otxk , ok - > output_id ) ;
2016-03-28 17:26:37 +00:00
result = mdb_cursor_get ( m_cur_output_txs , ( MDB_val * ) & zerokval , & otxk , MDB_GET_BOTH ) ;
if ( result = = MDB_NOTFOUND )
2015-01-12 02:04:04 +00:00
{
2016-12-01 14:29:35 +00:00
throw0 ( DB_ERROR ( " Unexpected: global output index not found in m_output_txs " ) ) ;
2015-01-12 02:04:04 +00:00
}
2016-03-28 17:26:37 +00:00
else if ( result )
2015-01-12 02:04:04 +00:00
{
2016-03-28 17:26:37 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Error adding removal of output tx to db transaction " , result ) . c_str ( ) ) ) ;
2015-01-12 02:04:04 +00:00
}
2016-03-28 17:26:37 +00:00
result = mdb_cursor_del ( m_cur_output_txs , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " Error deleting output index " ) . append ( boost : : lexical_cast < std : : string > ( out_index ) . append ( " : " ) ) . c_str ( ) , result ) . c_str ( ) ) ) ;
// now delete the amount
2016-03-27 21:43:16 +00:00
result = mdb_cursor_del ( m_cur_output_amounts , 0 ) ;
if ( result )
2016-03-28 17:26:37 +00:00
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " Error deleting amount for output index " ) . append ( boost : : lexical_cast < std : : string > ( out_index ) . append ( " : " ) ) . c_str ( ) , result ) . c_str ( ) ) ) ;
2015-01-12 02:04:04 +00:00
}
2014-10-21 20:33:43 +00:00
void BlockchainLMDB : : add_spent_key ( const crypto : : key_image & k_image )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2016-02-18 12:09:57 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
2014-10-23 23:47:36 +00:00
2016-01-07 06:33:22 +00:00
CURSOR ( spent_keys )
2016-03-06 05:05:49 +00:00
MDB_val k = { sizeof ( k_image ) , ( void * ) & k_image } ;
if ( auto result = mdb_cursor_put ( m_cur_spent_keys , ( MDB_val * ) & zerokval , & k , MDB_NODUPDATA ) ) {
if ( result = = MDB_KEYEXIST )
2014-12-12 21:34:45 +00:00
throw1 ( KEY_IMAGE_EXISTS ( " Attempting to add spent key image that's already in the db " ) ) ;
2016-03-06 05:05:49 +00:00
else
throw1 ( DB_ERROR ( lmdb_error ( " Error adding spent key image to db transaction: " , result ) . c_str ( ) ) ) ;
}
2014-10-21 20:33:43 +00:00
}
void BlockchainLMDB : : remove_spent_key ( const crypto : : key_image & k_image )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2016-03-31 19:55:16 +00:00
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( spent_keys )
2014-10-24 00:32:31 +00:00
2016-03-06 05:05:49 +00:00
MDB_val k = { sizeof ( k_image ) , ( void * ) & k_image } ;
2016-03-31 19:55:16 +00:00
auto result = mdb_cursor_get ( m_cur_spent_keys , ( MDB_val * ) & zerokval , & k , MDB_GET_BOTH ) ;
2014-10-24 00:32:31 +00:00
if ( result ! = 0 & & result ! = MDB_NOTFOUND )
2016-03-31 19:55:16 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Error finding spent key to remove " , result ) . c_str ( ) ) ) ;
if ( ! result )
{
result = mdb_cursor_del ( m_cur_spent_keys , 0 ) ;
if ( result )
throw1 ( DB_ERROR ( lmdb_error ( " Error adding removal of key image to db transaction " , result ) . c_str ( ) ) ) ;
}
2014-10-23 19:37:10 +00:00
}
2015-05-27 18:03:46 +00:00
blobdata BlockchainLMDB : : output_to_blob ( const tx_out & output ) const
2014-10-28 00:45:33 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-28 00:45:33 +00:00
blobdata b ;
if ( ! t_serializable_object_to_blob ( output , b ) )
2014-12-12 21:34:45 +00:00
throw1 ( DB_ERROR ( " Error serializing output to blob " ) ) ;
2014-10-28 00:45:33 +00:00
return b ;
}
2014-12-06 21:37:22 +00:00
tx_out BlockchainLMDB : : output_from_blob ( const blobdata & blob ) const
2014-10-28 00:45:33 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-28 00:45:33 +00:00
std : : stringstream ss ;
ss < < blob ;
binary_archive < false > ba ( ss ) ;
tx_out o ;
if ( ! ( : : serialization : : serialize ( ba , o ) ) )
2014-12-12 21:34:45 +00:00
throw1 ( DB_ERROR ( " Error deserializing tx output blob " ) ) ;
2014-10-28 00:45:33 +00:00
return o ;
}
2014-12-06 21:37:22 +00:00
void BlockchainLMDB : : check_open ( ) const
2014-10-23 19:37:10 +00:00
{
2017-02-18 17:27:33 +00:00
// LOG_PRINT_L3("BlockchainLMDB::" << __func__);
2014-10-23 19:37:10 +00:00
if ( ! m_open )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " DB operation attempted on a not-open DB instance " ) ) ;
2014-10-21 20:33:43 +00:00
}
BlockchainLMDB : : ~ BlockchainLMDB ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-02-11 23:55:53 +00:00
// batch transaction shouldn't be active at this point. If it is, consider it aborted.
if ( m_batch_active )
batch_abort ( ) ;
2015-12-28 19:23:02 +00:00
if ( m_open )
close ( ) ;
2014-10-21 20:33:43 +00:00
}
2015-02-11 23:55:53 +00:00
BlockchainLMDB : : BlockchainLMDB ( bool batch_transactions )
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
// initialize folder to something "safe" just in case
// someone accidentally misuses this class...
m_folder = " thishsouldnotexistbecauseitisgibberish " ;
m_open = false ;
2015-02-11 23:55:53 +00:00
m_batch_transactions = batch_transactions ;
m_write_txn = nullptr ;
2015-05-18 09:45:15 +00:00
m_write_batch_txn = nullptr ;
2015-02-11 23:55:53 +00:00
m_batch_active = false ;
2016-02-14 20:27:43 +00:00
m_cum_size = 0 ;
m_cum_count = 0 ;
2016-02-08 15:51:57 +00:00
m_hardfork = nullptr ;
2014-10-21 20:33:43 +00:00
}
2017-08-19 14:27:13 +00:00
void BlockchainLMDB : : open ( const std : : string & filename , const int db_flags )
2014-10-21 20:33:43 +00:00
{
2016-03-20 18:06:04 +00:00
int result ;
2017-08-19 14:27:13 +00:00
int mdb_flags = MDB_NORDAHEAD ;
2016-03-20 18:06:04 +00:00
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
if ( m_open )
2014-12-12 21:34:45 +00:00
throw0 ( DB_OPEN_FAILURE ( " Attempted to open db, but it's already open " ) ) ;
2014-10-23 19:37:10 +00:00
boost : : filesystem : : path direc ( filename ) ;
if ( boost : : filesystem : : exists ( direc ) )
{
if ( ! boost : : filesystem : : is_directory ( direc ) )
2014-12-12 21:34:45 +00:00
throw0 ( DB_OPEN_FAILURE ( " LMDB needs a directory path, but a file was passed " ) ) ;
2014-10-23 19:37:10 +00:00
}
else
2014-10-21 20:33:43 +00:00
{
2015-12-13 11:09:43 +00:00
if ( ! boost : : filesystem : : create_directories ( direc ) )
2014-12-12 21:34:45 +00:00
throw0 ( DB_OPEN_FAILURE ( std : : string ( " Failed to create directory " ) . append ( filename ) . c_str ( ) ) ) ;
2014-10-23 19:37:10 +00:00
}
2015-02-19 14:37:00 +00:00
// check for existing LMDB files in base directory
boost : : filesystem : : path old_files = direc . parent_path ( ) ;
2017-01-28 14:55:14 +00:00
if ( boost : : filesystem : : exists ( old_files / CRYPTONOTE_BLOCKCHAINDATA_FILENAME )
| | boost : : filesystem : : exists ( old_files / CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME ) )
2015-02-19 14:37:00 +00:00
{
2015-05-08 18:32:20 +00:00
LOG_PRINT_L0 ( " Found existing LMDB files in " < < old_files . string ( ) ) ;
2017-01-28 14:55:14 +00:00
LOG_PRINT_L0 ( " Move " < < CRYPTONOTE_BLOCKCHAINDATA_FILENAME < < " and/or " < < CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME < < " to " < < filename < < " , or delete them, and then restart " ) ;
2015-02-19 14:37:00 +00:00
throw DB_ERROR ( " Database could not be opened " ) ;
}
2014-10-23 19:37:10 +00:00
m_folder = filename ;
// set up lmdb environment
2016-03-20 18:06:04 +00:00
if ( ( result = mdb_env_create ( & m_env ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create lmdb environment: " , result ) . c_str ( ) ) ) ;
if ( ( result = mdb_env_set_maxdbs ( m_env , 20 ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to set max number of dbs: " , result ) . c_str ( ) ) ) ;
2014-12-12 21:34:45 +00:00
2015-05-17 02:05:54 +00:00
size_t mapsize = DEFAULT_MAPSIZE ;
2015-05-30 14:48:16 +00:00
2017-08-19 14:27:13 +00:00
if ( db_flags & DBF_FAST )
mdb_flags | = MDB_NOSYNC ;
if ( db_flags & DBF_FASTEST )
mdb_flags | = MDB_NOSYNC | MDB_WRITEMAP | MDB_MAPASYNC ;
if ( db_flags & DBF_RDONLY )
mdb_flags = MDB_RDONLY ;
if ( db_flags & DBF_SALVAGE )
mdb_flags | = MDB_PREVSNAPSHOT ;
2015-02-11 23:55:53 +00:00
if ( auto result = mdb_env_open ( m_env , filename . c_str ( ) , mdb_flags , 0644 ) )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open lmdb environment: " , result ) . c_str ( ) ) ) ;
2014-10-23 19:37:10 +00:00
2015-05-30 14:48:16 +00:00
MDB_envinfo mei ;
mdb_env_info ( m_env , & mei ) ;
uint64_t cur_mapsize = ( double ) mei . me_mapsize ;
if ( cur_mapsize < mapsize )
{
if ( auto result = mdb_env_set_mapsize ( m_env , mapsize ) )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to set max memory map size: " , result ) . c_str ( ) ) ) ;
2015-05-30 14:48:16 +00:00
mdb_env_info ( m_env , & mei ) ;
cur_mapsize = ( double ) mei . me_mapsize ;
LOG_PRINT_L1 ( " LMDB memory map size: " < < cur_mapsize ) ;
}
if ( need_resize ( ) )
{
2016-09-21 19:40:07 +00:00
LOG_PRINT_L0 ( " LMDB memory map needs to be resized, doing that now. " ) ;
2015-05-30 14:48:16 +00:00
do_resize ( ) ;
}
2015-05-16 08:29:02 +00:00
int txn_flags = 0 ;
if ( mdb_flags & MDB_RDONLY )
txn_flags | = MDB_RDONLY ;
// get a read/write MDB_txn, depending on mdb_flags
2015-03-14 21:24:51 +00:00
mdb_txn_safe txn ;
2015-12-13 16:45:03 +00:00
if ( auto mdb_res = mdb_txn_begin ( m_env , NULL , txn_flags , txn ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
2014-10-21 20:33:43 +00:00
2014-10-23 19:37:10 +00:00
// open necessary databases, and set properties as needed
// uses macros to avoid having to change things too many places
2014-10-28 00:45:33 +00:00
lmdb_db_open ( txn , LMDB_BLOCKS , MDB_INTEGERKEY | MDB_CREATE , m_blocks , " Failed to open db handle for m_blocks " ) ;
2014-10-23 19:37:10 +00:00
2016-03-28 17:26:37 +00:00
lmdb_db_open ( txn , LMDB_BLOCK_INFO , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_block_info , " Failed to open db handle for m_block_info " ) ;
2016-03-06 09:18:51 +00:00
lmdb_db_open ( txn , LMDB_BLOCK_HEIGHTS , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_block_heights , " Failed to open db handle for m_block_heights " ) ;
2014-10-23 23:47:36 +00:00
2016-03-04 17:38:15 +00:00
lmdb_db_open ( txn , LMDB_TXS , MDB_INTEGERKEY | MDB_CREATE , m_txs , " Failed to open db handle for m_txs " ) ;
2016-03-06 09:18:51 +00:00
lmdb_db_open ( txn , LMDB_TX_INDICES , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_tx_indices , " Failed to open db handle for m_tx_indices " ) ;
2016-03-04 17:38:15 +00:00
lmdb_db_open ( txn , LMDB_TX_OUTPUTS , MDB_INTEGERKEY | MDB_CREATE , m_tx_outputs , " Failed to open db handle for m_tx_outputs " ) ;
2014-10-23 23:47:36 +00:00
2016-03-28 17:26:37 +00:00
lmdb_db_open ( txn , LMDB_OUTPUT_TXS , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_output_txs , " Failed to open db handle for m_output_txs " ) ;
2016-03-01 01:51:11 +00:00
lmdb_db_open ( txn , LMDB_OUTPUT_AMOUNTS , MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE , m_output_amounts , " Failed to open db handle for m_output_amounts " ) ;
2014-10-30 22:33:35 +00:00
2016-03-06 09:18:51 +00:00
lmdb_db_open ( txn , LMDB_SPENT_KEYS , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_spent_keys , " Failed to open db handle for m_spent_keys " ) ;
2014-10-21 20:33:43 +00:00
2017-05-14 13:06:55 +00:00
lmdb_db_open ( txn , LMDB_TXPOOL_META , MDB_CREATE , m_txpool_meta , " Failed to open db handle for m_txpool_meta " ) ;
lmdb_db_open ( txn , LMDB_TXPOOL_BLOB , MDB_CREATE , m_txpool_blob , " Failed to open db handle for m_txpool_blob " ) ;
2016-08-26 18:33:25 +00:00
// this subdb is dropped on sight, so it may not be present when we open the DB.
// Since we use MDB_CREATE, we'll get an exception if we open read-only and it does not exist.
// So we don't open for read-only, and also not drop below. It is not used elsewhere.
if ( ! ( mdb_flags & MDB_RDONLY ) )
lmdb_db_open ( txn , LMDB_HF_STARTING_HEIGHTS , MDB_CREATE , m_hf_starting_heights , " Failed to open db handle for m_hf_starting_heights " ) ;
2016-03-04 20:05:51 +00:00
lmdb_db_open ( txn , LMDB_HF_VERSIONS , MDB_INTEGERKEY | MDB_CREATE , m_hf_versions , " Failed to open db handle for m_hf_versions " ) ;
2015-09-20 17:41:38 +00:00
2015-10-26 15:52:07 +00:00
lmdb_db_open ( txn , LMDB_PROPERTIES , MDB_CREATE , m_properties , " Failed to open db handle for m_properties " ) ;
2016-03-06 05:05:49 +00:00
mdb_set_dupsort ( txn , m_spent_keys , compare_hash32 ) ;
2016-03-06 06:21:15 +00:00
mdb_set_dupsort ( txn , m_block_heights , compare_hash32 ) ;
2016-03-06 07:08:22 +00:00
mdb_set_dupsort ( txn , m_tx_indices , compare_hash32 ) ;
2016-03-01 01:51:11 +00:00
mdb_set_dupsort ( txn , m_output_amounts , compare_uint64 ) ;
2016-03-28 17:26:37 +00:00
mdb_set_dupsort ( txn , m_output_txs , compare_uint64 ) ;
mdb_set_dupsort ( txn , m_block_info , compare_uint64 ) ;
2016-03-04 18:59:20 +00:00
2017-05-14 13:06:55 +00:00
mdb_set_compare ( txn , m_txpool_meta , compare_hash32 ) ;
mdb_set_compare ( txn , m_txpool_blob , compare_hash32 ) ;
2015-10-26 15:52:07 +00:00
mdb_set_compare ( txn , m_properties , compare_string ) ;
2014-10-29 02:25:03 +00:00
2016-08-26 18:33:25 +00:00
if ( ! ( mdb_flags & MDB_RDONLY ) )
{
result = mdb_drop ( txn , m_hf_starting_heights , 1 ) ;
2016-09-24 15:15:50 +00:00
if ( result & & result ! = MDB_NOTFOUND )
2016-08-26 18:33:25 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_hf_starting_heights: " , result ) . c_str ( ) ) ) ;
}
2016-07-13 20:19:05 +00:00
2014-10-23 19:37:10 +00:00
// get and keep current height
MDB_stat db_stats ;
2016-03-20 18:06:04 +00:00
if ( ( result = mdb_stat ( txn , m_blocks , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_blocks: " , result ) . c_str ( ) ) ) ;
2014-10-28 19:30:16 +00:00
LOG_PRINT_L2 ( " Setting m_height to: " < < db_stats . ms_entries ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = db_stats . ms_entries ;
2014-10-23 19:37:10 +00:00
2015-10-26 15:52:07 +00:00
bool compatible = true ;
MDB_val_copy < const char * > k ( " version " ) ;
MDB_val v ;
auto get_result = mdb_get ( txn , m_properties , & k , & v ) ;
if ( get_result = = MDB_SUCCESS )
{
if ( * ( const uint32_t * ) v . mv_data > VERSION )
{
2017-01-16 20:14:56 +00:00
MWARNING ( " Existing lmdb database was made by a later version. We don't know how it will change yet. " ) ;
2015-10-26 15:52:07 +00:00
compatible = false ;
}
2016-02-17 04:05:39 +00:00
# if VERSION > 0
else if ( * ( const uint32_t * ) v . mv_data < VERSION )
2015-10-26 15:52:07 +00:00
{
2016-04-06 17:05:20 +00:00
// Note that there was a schema change within version 0 as well.
// See commit e5d2680094ee15889934fe28901e4e133cda56f2 2015/07/10
// We don't handle the old format previous to that commit.
txn . commit ( ) ;
m_open = true ;
2016-04-10 16:25:13 +00:00
migrate ( * ( const uint32_t * ) v . mv_data ) ;
2016-04-06 17:05:20 +00:00
return ;
2015-10-26 15:52:07 +00:00
}
2016-02-17 04:05:39 +00:00
# endif
2015-10-26 15:52:07 +00:00
}
else
{
2016-04-06 17:05:20 +00:00
// if not found, and the DB is non-empty, this is probably
// an "old" version 0, which we don't handle. If the DB is
// empty it's fine.
2015-10-26 15:52:07 +00:00
if ( VERSION > 0 & & m_height > 0 )
compatible = false ;
}
if ( ! compatible )
{
txn . abort ( ) ;
mdb_env_close ( m_env ) ;
m_open = false ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MFATAL ( " Existing lmdb database is incompatible with this version. " ) ;
MFATAL ( " Please delete the existing database and resync. " ) ;
2015-10-26 15:52:07 +00:00
return ;
}
if ( ! ( mdb_flags & MDB_RDONLY ) )
{
// only write version on an empty DB
if ( m_height = = 0 )
{
MDB_val_copy < const char * > k ( " version " ) ;
MDB_val_copy < uint32_t > v ( VERSION ) ;
auto put_result = mdb_put ( txn , m_properties , & k , & v , 0 ) ;
if ( put_result ! = MDB_SUCCESS )
{
txn . abort ( ) ;
mdb_env_close ( m_env ) ;
m_open = false ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MERROR ( " Failed to write version to database. " ) ;
2015-10-26 15:52:07 +00:00
return ;
}
}
}
2014-10-23 19:37:10 +00:00
// commit the transaction
txn . commit ( ) ;
2014-10-21 20:33:43 +00:00
2014-10-23 19:37:10 +00:00
m_open = true ;
2014-10-21 20:33:43 +00:00
// from here, init should be finished
}
void BlockchainLMDB : : close ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-02-11 23:55:53 +00:00
if ( m_batch_active )
{
LOG_PRINT_L3 ( " close() first calling batch_abort() due to active batch transaction " ) ;
batch_abort ( ) ;
}
this - > sync ( ) ;
2016-02-18 12:09:57 +00:00
m_tinfo . reset ( ) ;
2015-02-11 23:55:53 +00:00
2014-10-28 00:45:33 +00:00
// FIXME: not yet thread safe!!! Use with care.
mdb_env_close ( m_env ) ;
2015-12-28 19:23:02 +00:00
m_open = false ;
2014-10-21 20:33:43 +00:00
}
void BlockchainLMDB : : sync ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-02-19 04:52:44 +00:00
check_open ( ) ;
2015-02-11 23:55:53 +00:00
// Does nothing unless LMDB environment was opened with MDB_NOSYNC or in part
// MDB_NOMETASYNC. Force flush to be synchronous.
if ( auto result = mdb_env_sync ( m_env , true ) )
{
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to sync database: " , result ) . c_str ( ) ) ) ;
2015-02-11 23:55:53 +00:00
}
2014-10-21 20:33:43 +00:00
}
2017-08-19 18:36:51 +00:00
void BlockchainLMDB : : safesyncmode ( const bool onoff )
{
mdb_env_set_flags ( m_env , MDB_NOSYNC | MDB_MAPASYNC , ! onoff ) ;
}
2014-10-21 20:33:43 +00:00
void BlockchainLMDB : : reset ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-12-13 11:12:34 +00:00
check_open ( ) ;
mdb_txn_safe txn ;
2017-02-18 21:00:55 +00:00
if ( auto result = lmdb_txn_begin ( m_env , NULL , 0 , txn ) )
2016-03-20 18:06:04 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
2016-06-25 08:59:21 +00:00
2016-07-26 21:39:51 +00:00
if ( auto result = mdb_drop ( txn , m_blocks , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_blocks: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_block_info , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_block_info: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_block_heights , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_block_heights: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_txs , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_txs: " , result ) . c_str ( ) ) ) ;
2016-06-25 08:59:21 +00:00
if ( auto result = mdb_drop ( txn , m_tx_indices , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_tx_indices: " , result ) . c_str ( ) ) ) ;
2016-07-26 21:39:51 +00:00
if ( auto result = mdb_drop ( txn , m_tx_outputs , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_tx_outputs: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_output_txs , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_output_txs: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_output_amounts , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_output_amounts: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_spent_keys , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_spent_keys: " , result ) . c_str ( ) ) ) ;
2016-08-10 09:50:17 +00:00
( void ) mdb_drop ( txn , m_hf_starting_heights , 0 ) ; // this one is dropped in new code
2016-07-26 21:39:51 +00:00
if ( auto result = mdb_drop ( txn , m_hf_versions , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_hf_versions: " , result ) . c_str ( ) ) ) ;
if ( auto result = mdb_drop ( txn , m_properties , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to drop m_properties: " , result ) . c_str ( ) ) ) ;
2016-06-25 08:59:21 +00:00
// init with current version
MDB_val_copy < const char * > k ( " version " ) ;
MDB_val_copy < uint32_t > v ( VERSION ) ;
if ( auto result = mdb_put ( txn , m_properties , & k , & v , 0 ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to write version to database: " , result ) . c_str ( ) ) ) ;
2015-12-13 11:12:34 +00:00
txn . commit ( ) ;
2016-02-14 20:27:43 +00:00
m_cum_size = 0 ;
m_cum_count = 0 ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
std : : vector < std : : string > BlockchainLMDB : : get_filenames ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
std : : vector < std : : string > filenames ;
boost : : filesystem : : path datafile ( m_folder ) ;
2017-01-28 14:55:14 +00:00
datafile / = CRYPTONOTE_BLOCKCHAINDATA_FILENAME ;
2014-10-23 19:37:10 +00:00
boost : : filesystem : : path lockfile ( m_folder ) ;
2017-01-28 14:55:14 +00:00
lockfile / = CRYPTONOTE_BLOCKCHAINDATA_LOCK_FILENAME ;
2014-10-23 19:37:10 +00:00
filenames . push_back ( datafile . string ( ) ) ;
filenames . push_back ( lockfile . string ( ) ) ;
return filenames ;
2014-10-21 20:33:43 +00:00
}
2015-03-14 01:39:27 +00:00
std : : string BlockchainLMDB : : get_db_name ( ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
return std : : string ( " lmdb " ) ;
}
2014-10-28 00:45:33 +00:00
// TODO: this?
2014-10-21 20:33:43 +00:00
bool BlockchainLMDB : : lock ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
return false ;
}
2014-10-28 00:45:33 +00:00
// TODO: this?
2014-10-21 20:33:43 +00:00
void BlockchainLMDB : : unlock ( )
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
}
2015-12-14 18:47:13 +00:00
# define TXN_PREFIX(flags); \
mdb_txn_safe auto_txn ; \
mdb_txn_safe * txn_ptr = & auto_txn ; \
if ( m_batch_active ) \
txn_ptr = m_write_txn ; \
else \
{ \
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , flags , auto_txn ) ) \
2015-12-14 18:47:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " Failed to create a transaction for the db in " ) + __FUNCTION__ + " : " , mdb_res ) . c_str ( ) ) ) ; \
} \
2016-02-18 12:09:57 +00:00
# define TXN_PREFIX_RDONLY() \
2016-03-14 18:26:15 +00:00
MDB_txn * m_txn ; \
mdb_txn_cursors * m_cursors ; \
2016-03-16 10:24:48 +00:00
bool my_rtxn = block_rtxn_start ( & m_txn , & m_cursors ) ; \
mdb_txn_safe auto_txn ( my_rtxn ) ; \
if ( my_rtxn ) auto_txn . m_tinfo = m_tinfo . get ( )
# define TXN_POSTFIX_RDONLY()
2015-12-14 18:47:13 +00:00
# define TXN_POSTFIX_SUCCESS() \
do { \
if ( ! m_batch_active ) \
auto_txn . commit ( ) ; \
} while ( 0 )
2016-02-08 12:57:16 +00:00
// The below two macros are for DB access within block add/remove, whether
// regular batch txn is in use or not. m_write_txn is used as a batch txn, even
// if it's only within block add/remove.
//
// DB access functions that may be called both within block add/remove and
// without should use these. If the function will be called ONLY within block
// add/remove, m_write_txn alone may be used instead of these macros.
# define TXN_BLOCK_PREFIX(flags); \
mdb_txn_safe auto_txn ; \
mdb_txn_safe * txn_ptr = & auto_txn ; \
if ( m_batch_active | | m_write_txn ) \
txn_ptr = m_write_txn ; \
else \
{ \
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , flags , auto_txn ) ) \
2016-02-08 12:57:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " Failed to create a transaction for the db in " ) + __FUNCTION__ + " : " , mdb_res ) . c_str ( ) ) ) ; \
} \
# define TXN_BLOCK_POSTFIX_SUCCESS() \
do { \
if ( ! m_batch_active & & ! m_write_txn ) \
auto_txn . commit ( ) ; \
} while ( 0 )
2017-05-14 13:06:55 +00:00
void BlockchainLMDB : : add_txpool_tx ( const transaction & tx , const txpool_tx_meta_t & meta )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( txpool_meta )
CURSOR ( txpool_blob )
const crypto : : hash txid = get_transaction_hash ( tx ) ;
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
MDB_val v = { sizeof ( meta ) , ( void * ) & meta } ;
if ( auto result = mdb_cursor_put ( m_cur_txpool_meta , & k , & v , MDB_NODUPDATA ) ) {
if ( result = = MDB_KEYEXIST )
throw1 ( DB_ERROR ( " Attempting to add txpool tx metadata that's already in the db " ) ) ;
else
throw1 ( DB_ERROR ( lmdb_error ( " Error adding txpool tx metadata to db transaction: " , result ) . c_str ( ) ) ) ;
}
MDB_val_copy < cryptonote : : blobdata > blob_val ( tx_to_blob ( tx ) ) ;
if ( auto result = mdb_cursor_put ( m_cur_txpool_blob , & k , & blob_val , MDB_NODUPDATA ) ) {
if ( result = = MDB_KEYEXIST )
throw1 ( DB_ERROR ( " Attempting to add txpool tx blob that's already in the db " ) ) ;
else
throw1 ( DB_ERROR ( lmdb_error ( " Error adding txpool tx blob to db transaction: " , result ) . c_str ( ) ) ) ;
}
}
void BlockchainLMDB : : update_txpool_tx ( const crypto : : hash & txid , const txpool_tx_meta_t & meta )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( txpool_meta )
CURSOR ( txpool_blob )
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
MDB_val v ;
auto result = mdb_cursor_get ( m_cur_txpool_meta , & k , & v , MDB_SET ) ;
if ( result ! = 0 )
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx meta to update: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( m_cur_txpool_meta , 0 ) ;
if ( result )
throw1 ( DB_ERROR ( lmdb_error ( " Error adding removal of txpool tx metadata to db transaction: " , result ) . c_str ( ) ) ) ;
v = MDB_val ( { sizeof ( meta ) , ( void * ) & meta } ) ;
if ( ( result = mdb_cursor_put ( m_cur_txpool_meta , & k , & v , MDB_NODUPDATA ) ) ! = 0 ) {
if ( result = = MDB_KEYEXIST )
throw1 ( DB_ERROR ( " Attempting to add txpool tx metadata that's already in the db " ) ) ;
else
throw1 ( DB_ERROR ( lmdb_error ( " Error adding txpool tx metadata to db transaction: " , result ) . c_str ( ) ) ) ;
}
}
uint64_t BlockchainLMDB : : get_txpool_tx_count ( ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
int result ;
MDB_stat db_stats ;
if ( ( result = mdb_stat ( m_txn , m_txpool_meta , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_txpool_meta: " , result ) . c_str ( ) ) ) ;
TXN_POSTFIX_RDONLY ( ) ;
return db_stats . ms_entries ;
}
bool BlockchainLMDB : : txpool_has_tx ( const crypto : : hash & txid ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( txpool_meta )
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
auto result = mdb_cursor_get ( m_cur_txpool_meta , & k , NULL , MDB_SET ) ;
if ( result ! = 0 & & result ! = MDB_NOTFOUND )
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx meta: " , result ) . c_str ( ) ) ) ;
TXN_POSTFIX_RDONLY ( ) ;
return result ! = MDB_NOTFOUND ;
}
void BlockchainLMDB : : remove_txpool_tx ( const crypto : : hash & txid )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
mdb_txn_cursors * m_cursors = & m_wcursors ;
CURSOR ( txpool_meta )
CURSOR ( txpool_blob )
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
auto result = mdb_cursor_get ( m_cur_txpool_meta , & k , NULL , MDB_SET ) ;
if ( result ! = 0 & & result ! = MDB_NOTFOUND )
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx meta to remove: " , result ) . c_str ( ) ) ) ;
if ( ! result )
{
result = mdb_cursor_del ( m_cur_txpool_meta , 0 ) ;
if ( result )
throw1 ( DB_ERROR ( lmdb_error ( " Error adding removal of txpool tx metadata to db transaction: " , result ) . c_str ( ) ) ) ;
}
result = mdb_cursor_get ( m_cur_txpool_blob , & k , NULL , MDB_SET ) ;
if ( result ! = 0 & & result ! = MDB_NOTFOUND )
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx blob to remove: " , result ) . c_str ( ) ) ) ;
if ( ! result )
{
result = mdb_cursor_del ( m_cur_txpool_blob , 0 ) ;
if ( result )
throw1 ( DB_ERROR ( lmdb_error ( " Error adding removal of txpool tx blob to db transaction: " , result ) . c_str ( ) ) ) ;
}
}
txpool_tx_meta_t BlockchainLMDB : : get_txpool_tx_meta ( const crypto : : hash & txid ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( txpool_meta )
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
MDB_val v ;
auto result = mdb_cursor_get ( m_cur_txpool_meta , & k , & v , MDB_SET ) ;
if ( result ! = 0 )
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx meta: " , result ) . c_str ( ) ) ) ;
const txpool_tx_meta_t meta = * ( const txpool_tx_meta_t * ) v . mv_data ;
TXN_POSTFIX_RDONLY ( ) ;
return meta ;
}
2017-06-11 14:10:18 +00:00
bool BlockchainLMDB : : get_txpool_tx_blob ( const crypto : : hash & txid , cryptonote : : blobdata & bd ) const
2017-05-14 13:06:55 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( txpool_blob )
MDB_val k = { sizeof ( txid ) , ( void * ) & txid } ;
MDB_val v ;
auto result = mdb_cursor_get ( m_cur_txpool_blob , & k , & v , MDB_SET ) ;
2017-06-11 14:10:18 +00:00
if ( result = = MDB_NOTFOUND )
return false ;
2017-05-14 13:06:55 +00:00
if ( result ! = 0 )
2017-06-11 14:10:18 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Error finding txpool tx blob: " , result ) . c_str ( ) ) ) ;
2017-05-14 13:06:55 +00:00
bd . assign ( reinterpret_cast < const char * > ( v . mv_data ) , v . mv_size ) ;
TXN_POSTFIX_RDONLY ( ) ;
2017-06-11 14:10:18 +00:00
return true ;
}
cryptonote : : blobdata BlockchainLMDB : : get_txpool_tx_blob ( const crypto : : hash & txid ) const
{
cryptonote : : blobdata bd ;
if ( ! get_txpool_tx_blob ( txid , bd ) )
throw1 ( DB_ERROR ( " Tx not found in txpool: " ) ) ;
2017-05-14 13:06:55 +00:00
return bd ;
}
bool BlockchainLMDB : : for_all_txpool_txes ( std : : function < bool ( const crypto : : hash & , const txpool_tx_meta_t & , const cryptonote : : blobdata * ) > f , bool include_blob ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( txpool_meta ) ;
RCURSOR ( txpool_blob ) ;
MDB_val k ;
MDB_val v ;
bool ret = true ;
MDB_cursor_op op = MDB_FIRST ;
while ( 1 )
{
int result = mdb_cursor_get ( m_cur_txpool_meta , & k , & v , op ) ;
op = MDB_NEXT ;
if ( result = = MDB_NOTFOUND )
break ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate txpool tx metadata: " , result ) . c_str ( ) ) ) ;
const crypto : : hash txid = * ( const crypto : : hash * ) k . mv_data ;
const txpool_tx_meta_t & meta = * ( const txpool_tx_meta_t * ) v . mv_data ;
const cryptonote : : blobdata * passed_bd = NULL ;
cryptonote : : blobdata bd ;
if ( include_blob )
{
MDB_val b ;
result = mdb_cursor_get ( m_cur_txpool_blob , & k , & b , MDB_SET ) ;
if ( result = = MDB_NOTFOUND )
throw0 ( DB_ERROR ( " Failed to find txpool tx blob to match metadata " ) ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate txpool tx blob: " , result ) . c_str ( ) ) ) ;
bd . assign ( reinterpret_cast < const char * > ( b . mv_data ) , b . mv_size ) ;
passed_bd = & bd ;
}
if ( ! f ( txid , meta , passed_bd ) ) {
ret = false ;
break ;
}
}
TXN_POSTFIX_RDONLY ( ) ;
return ret ;
}
2016-08-30 15:39:33 +00:00
bool BlockchainLMDB : : block_exists ( const crypto : : hash & h , uint64_t * height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( block_heights ) ;
2014-10-23 19:37:10 +00:00
2016-03-15 10:43:52 +00:00
bool ret = false ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( key , h ) ;
2016-03-06 06:21:15 +00:00
auto get_result = mdb_cursor_get ( m_cur_block_heights , ( MDB_val * ) & zerokval , & key , MDB_GET_BOTH ) ;
2014-10-23 19:37:10 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-02-23 23:28:20 +00:00
LOG_PRINT_L3 ( " Block with hash " < < epee : : string_tools : : pod_to_hex ( h ) < < " not found in db " ) ;
2014-10-23 19:37:10 +00:00
}
else if ( get_result )
2016-03-09 18:24:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch block index from hash " , get_result ) . c_str ( ) ) ) ;
2016-03-15 10:43:52 +00:00
else
2016-08-30 15:39:33 +00:00
{
if ( height )
{
const blk_height * bhp = ( const blk_height * ) key . mv_data ;
* height = bhp - > bh_height ;
}
2016-03-15 10:43:52 +00:00
ret = true ;
2016-08-30 15:39:33 +00:00
}
2014-10-23 19:37:10 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2016-03-15 10:43:52 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2017-01-15 16:05:55 +00:00
cryptonote : : blobdata BlockchainLMDB : : get_block_blob ( const crypto : : hash & h ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-15 16:05:55 +00:00
return get_block_blob_from_height ( get_block_height ( h ) ) ;
2014-10-28 00:45:33 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_block_height ( const crypto : : hash & h ) const
2014-10-28 00:45:33 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-28 00:45:33 +00:00
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( block_heights ) ;
2014-10-23 19:37:10 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( key , h ) ;
2016-03-06 06:21:15 +00:00
auto get_result = mdb_cursor_get ( m_cur_block_heights , ( MDB_val * ) & zerokval , & key , MDB_GET_BOTH ) ;
2014-10-23 19:37:10 +00:00
if ( get_result = = MDB_NOTFOUND )
2014-12-12 21:34:45 +00:00
throw1 ( BLOCK_DNE ( " Attempted to retrieve non-existent block height " ) ) ;
2014-10-23 19:37:10 +00:00
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a block height from the db " ) ) ;
2014-10-23 19:37:10 +00:00
2016-03-06 06:21:15 +00:00
blk_height * bhp = ( blk_height * ) key . mv_data ;
2016-04-04 01:10:58 +00:00
uint64_t ret = bhp - > bh_height ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
block_header BlockchainLMDB : : get_block_header ( const crypto : : hash & h ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
// block_header object is automatically cast from block object
return get_block ( h ) ;
2014-10-21 20:33:43 +00:00
}
2017-01-15 16:05:55 +00:00
cryptonote : : blobdata BlockchainLMDB : : get_block_blob_from_height ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( blocks ) ;
2014-10-23 19:37:10 +00:00
2014-12-12 13:27:05 +00:00
MDB_val_copy < uint64_t > key ( height ) ;
2014-10-23 19:37:10 +00:00
MDB_val result ;
2016-02-18 12:09:57 +00:00
auto get_result = mdb_cursor_get ( m_cur_blocks , & key , & result , MDB_SET ) ;
2014-10-23 19:37:10 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-10-25 10:36:12 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get block from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- block not in db " ) . c_str ( ) ) ) ;
2014-10-23 19:37:10 +00:00
}
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a block from the db " ) ) ;
2014-10-23 19:37:10 +00:00
blobdata bd ;
bd . assign ( reinterpret_cast < char * > ( result . mv_data ) , result . mv_size ) ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
2017-01-15 16:05:55 +00:00
return bd ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_block_timestamp ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-03 04:03:04 +00:00
RCURSOR ( block_info ) ;
2015-12-14 18:47:13 +00:00
2016-03-28 17:26:37 +00:00
MDB_val_set ( result , height ) ;
auto get_result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & result , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-10-25 10:36:12 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get timestamp from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- timestamp not in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a timestamp from the db " ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info * bi = ( mdb_block_info * ) result . mv_data ;
uint64_t ret = bi - > bi_timestamp ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_top_block_timestamp ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-28 00:45:33 +00:00
// if no blocks, return 0
if ( m_height = = 0 )
{
return 0 ;
}
return get_block_timestamp ( m_height - 1 ) ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
size_t BlockchainLMDB : : get_block_size ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-03 04:03:04 +00:00
RCURSOR ( block_info ) ;
2014-10-28 00:45:33 +00:00
2016-03-28 17:26:37 +00:00
MDB_val_set ( result , height ) ;
auto get_result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & result , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-10-25 10:36:12 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get block size from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- block size not in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a block size from the db " ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info * bi = ( mdb_block_info * ) result . mv_data ;
size_t ret = bi - > bi_size ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
difficulty_type BlockchainLMDB : : get_block_cumulative_difficulty ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ < < " height: " < < height ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-03 04:03:04 +00:00
RCURSOR ( block_info ) ;
2015-12-14 18:47:13 +00:00
2016-03-28 17:26:37 +00:00
MDB_val_set ( result , height ) ;
auto get_result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & result , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-10-25 10:36:12 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get cumulative difficulty from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- difficulty not in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a cumulative difficulty from the db " ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info * bi = ( mdb_block_info * ) result . mv_data ;
difficulty_type ret = bi - > bi_diff ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
difficulty_type BlockchainLMDB : : get_block_difficulty ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
difficulty_type diff1 = 0 ;
difficulty_type diff2 = 0 ;
diff1 = get_block_cumulative_difficulty ( height ) ;
if ( height ! = 0 )
{
diff2 = get_block_cumulative_difficulty ( height - 1 ) ;
}
return diff1 - diff2 ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_block_already_generated_coins ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-03 04:03:04 +00:00
RCURSOR ( block_info ) ;
2014-10-28 00:45:33 +00:00
2016-03-28 17:26:37 +00:00
MDB_val_set ( result , height ) ;
auto get_result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & result , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-10-25 10:36:12 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get generated coins from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- block size not in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve a total generated coins from the db " ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info * bi = ( mdb_block_info * ) result . mv_data ;
uint64_t ret = bi - > bi_coins ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
crypto : : hash BlockchainLMDB : : get_block_hash_from_height ( const uint64_t & height ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-03 04:03:04 +00:00
RCURSOR ( block_info ) ;
2014-10-28 00:45:33 +00:00
2016-03-28 17:26:37 +00:00
MDB_val_set ( result , height ) ;
auto get_result = mdb_cursor_get ( m_cur_block_info , ( MDB_val * ) & zerokval , & result , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2014-12-12 21:34:45 +00:00
throw0 ( BLOCK_DNE ( std : : string ( " Attempt to get hash from height " ) . append ( boost : : lexical_cast < std : : string > ( height ) ) . append ( " failed -- hash not in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Error attempting to retrieve a block hash from the db: " , get_result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-03 04:03:04 +00:00
mdb_block_info * bi = ( mdb_block_info * ) result . mv_data ;
crypto : : hash ret = bi - > bi_hash ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-28 00:45:33 +00:00
}
2014-12-06 21:37:22 +00:00
std : : vector < block > BlockchainLMDB : : get_blocks_range ( const uint64_t & h1 , const uint64_t & h2 ) const
2014-10-28 00:45:33 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
std : : vector < block > v ;
2014-10-28 00:45:33 +00:00
for ( uint64_t height = h1 ; height < = h2 ; + + height )
{
v . push_back ( get_block_from_height ( height ) ) ;
}
2014-10-21 20:33:43 +00:00
return v ;
}
2014-12-06 21:37:22 +00:00
std : : vector < crypto : : hash > BlockchainLMDB : : get_hashes_range ( const uint64_t & h1 , const uint64_t & h2 ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
std : : vector < crypto : : hash > v ;
2014-10-28 00:45:33 +00:00
for ( uint64_t height = h1 ; height < = h2 ; + + height )
{
v . push_back ( get_block_hash_from_height ( height ) ) ;
}
2014-10-21 20:33:43 +00:00
return v ;
}
2014-12-06 21:37:22 +00:00
crypto : : hash BlockchainLMDB : : top_block_hash ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-28 00:45:33 +00:00
if ( m_height ! = 0 )
{
return get_block_hash_from_height ( m_height - 1 ) ;
}
return null_hash ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
block BlockchainLMDB : : get_top_block ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2014-10-28 00:45:33 +00:00
if ( m_height ! = 0 )
{
return get_block_from_height ( m_height - 1 ) ;
}
2014-10-21 20:33:43 +00:00
block b ;
return b ;
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : height ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
TXN_PREFIX_RDONLY ( ) ;
int result ;
2014-10-23 23:47:36 +00:00
2017-01-14 19:27:52 +00:00
// get current height
MDB_stat db_stats ;
if ( ( result = mdb_stat ( m_txn , m_blocks , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_blocks: " , result ) . c_str ( ) ) ) ;
return db_stats . ms_entries ;
2014-10-21 20:33:43 +00:00
}
2017-02-18 18:46:26 +00:00
uint64_t BlockchainLMDB : : num_outputs ( ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
int result ;
// get current height
MDB_stat db_stats ;
if ( ( result = mdb_stat ( m_txn , m_output_txs , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_output_txs: " , result ) . c_str ( ) ) ) ;
return db_stats . ms_entries ;
}
2014-12-06 21:37:22 +00:00
bool BlockchainLMDB : : tx_exists ( const crypto : : hash & h ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-04 17:38:15 +00:00
RCURSOR ( tx_indices ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( txs ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( key , h ) ;
2016-03-04 17:38:15 +00:00
bool tx_found = false ;
2015-02-11 04:01:02 +00:00
TIME_MEASURE_START ( time1 ) ;
2016-03-06 07:08:22 +00:00
auto get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & key , MDB_GET_BOTH ) ;
2016-03-04 17:38:15 +00:00
if ( get_result = = 0 )
tx_found = true ;
else if ( get_result ! = MDB_NOTFOUND )
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " DB error attempting to fetch transaction index from hash " ) + epee : : string_tools : : pod_to_hex ( h ) + " : " , get_result ) . c_str ( ) ) ) ;
2016-03-04 19:56:36 +00:00
// This isn't needed as part of the check. we're not checking consistency of db.
// get_result = mdb_cursor_get(m_cur_txs, &val_tx_index, &result, MDB_SET);
2015-02-11 04:01:02 +00:00
TIME_MEASURE_FINISH ( time1 ) ;
time_tx_exists + = time1 ;
2015-12-14 18:47:13 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-14 18:47:13 +00:00
2016-03-04 17:38:15 +00:00
if ( ! tx_found )
2014-10-28 00:45:33 +00:00
{
2015-02-11 23:55:53 +00:00
LOG_PRINT_L1 ( " transaction with hash " < < epee : : string_tools : : pod_to_hex ( h ) < < " not found in db " ) ;
2014-10-28 00:45:33 +00:00
return false ;
}
2016-03-04 17:38:15 +00:00
2016-03-04 19:56:36 +00:00
// Below not needed due to above comment.
// if (get_result == MDB_NOTFOUND)
// throw0(DB_ERROR(std::string("transaction with hash ").append(epee::string_tools::pod_to_hex(h)).append(" not found at index").c_str()));
// else if (get_result)
// throw0(DB_ERROR(lmdb_error(std::string("DB error attempting to fetch transaction ") + epee::string_tools::pod_to_hex(h) + " at index: ", get_result).c_str()));
2014-10-28 00:45:33 +00:00
return true ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 01:10:58 +00:00
bool BlockchainLMDB : : tx_exists ( const crypto : : hash & h , uint64_t & tx_id ) const
2016-03-04 18:59:20 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( tx_indices ) ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , h ) ;
2016-03-04 18:59:20 +00:00
TIME_MEASURE_START ( time1 ) ;
2016-03-06 07:08:22 +00:00
auto get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2016-03-04 18:59:20 +00:00
TIME_MEASURE_FINISH ( time1 ) ;
time_tx_exists + = time1 ;
2016-03-06 07:08:22 +00:00
if ( ! get_result ) {
txindex * tip = ( txindex * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
tx_id = tip - > data . tx_id ;
2016-03-06 07:08:22 +00:00
}
2016-03-04 18:59:20 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2016-03-15 10:43:52 +00:00
bool ret = false ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2015-02-11 23:55:53 +00:00
LOG_PRINT_L1 ( " transaction with hash " < < epee : : string_tools : : pod_to_hex ( h ) < < " not found in db " ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2016-03-09 18:24:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch transaction from hash " , get_result ) . c_str ( ) ) ) ;
2016-03-15 10:43:52 +00:00
else
ret = true ;
2014-10-28 00:45:33 +00:00
2016-03-15 10:43:52 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_tx_unlock_time ( const crypto : : hash & h ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-04 17:38:15 +00:00
RCURSOR ( tx_indices ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , h ) ;
2016-03-06 07:08:22 +00:00
auto get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
2016-03-04 19:56:36 +00:00
throw1 ( TX_DNE ( lmdb_error ( std : : string ( " tx data with hash " ) + epee : : string_tools : : pod_to_hex ( h ) + " not found in db: " , get_result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
else if ( get_result )
2016-03-04 19:56:36 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch tx data from hash: " , get_result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-06 07:08:22 +00:00
txindex * tip = ( txindex * ) v . mv_data ;
uint64_t ret = tip - > data . unlock_time ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2017-01-15 16:05:55 +00:00
bool BlockchainLMDB : : get_tx_blob ( const crypto : : hash & h , cryptonote : : blobdata & bd ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-04 17:38:15 +00:00
RCURSOR ( tx_indices ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( txs ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , h ) ;
2014-10-28 00:45:33 +00:00
MDB_val result ;
2016-03-06 07:08:22 +00:00
auto get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2016-03-04 17:38:15 +00:00
if ( get_result = = 0 )
2016-03-04 19:56:36 +00:00
{
2016-03-06 07:08:22 +00:00
txindex * tip = ( txindex * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( val_tx_id , tip - > data . tx_id ) ;
get_result = mdb_cursor_get ( m_cur_txs , & val_tx_id , & result , MDB_SET ) ;
2016-03-04 19:56:36 +00:00
}
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
2016-12-23 16:38:28 +00:00
return false ;
2014-10-28 00:45:33 +00:00
else if ( get_result )
2016-03-09 18:24:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch tx from hash " , get_result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
bd . assign ( reinterpret_cast < char * > ( result . mv_data ) , result . mv_size ) ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2014-10-28 00:45:33 +00:00
2016-12-23 16:38:28 +00:00
return true ;
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_tx_count ( ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2017-03-03 16:12:31 +00:00
int result ;
2014-10-28 00:45:33 +00:00
MDB_stat db_stats ;
2017-03-03 16:12:31 +00:00
if ( ( result = mdb_stat ( m_txn , m_txs , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_txs: " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2014-10-28 00:45:33 +00:00
return db_stats . ms_entries ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
std : : vector < transaction > BlockchainLMDB : : get_tx_list ( const std : : vector < crypto : : hash > & hlist ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-21 20:33:43 +00:00
std : : vector < transaction > v ;
2014-10-28 00:45:33 +00:00
for ( auto & h : hlist )
{
v . push_back ( get_tx ( h ) ) ;
}
2014-10-21 20:33:43 +00:00
return v ;
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_tx_block_height ( const crypto : : hash & h ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-04 17:38:15 +00:00
RCURSOR ( tx_indices ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , h ) ;
2016-03-06 07:08:22 +00:00
auto get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2014-10-28 00:45:33 +00:00
if ( get_result = = MDB_NOTFOUND )
{
2016-03-04 19:56:36 +00:00
throw1 ( TX_DNE ( std : : string ( " tx_data_t with hash " ) . append ( epee : : string_tools : : pod_to_hex ( h ) ) . append ( " not found in db " ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
}
else if ( get_result )
2016-03-09 18:24:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch tx height from hash " , get_result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-03-06 07:08:22 +00:00
txindex * tip = ( txindex * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
uint64_t ret = tip - > data . block_id ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2014-12-06 21:37:22 +00:00
uint64_t BlockchainLMDB : : get_num_outputs ( const uint64_t & amount ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( output_amounts ) ;
2014-10-28 00:45:33 +00:00
2014-12-12 13:27:05 +00:00
MDB_val_copy < uint64_t > k ( amount ) ;
2014-10-28 00:45:33 +00:00
MDB_val v ;
2016-03-15 10:43:52 +00:00
mdb_size_t num_elems = 0 ;
2016-02-18 12:09:57 +00:00
auto result = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_SET ) ;
2016-03-15 10:43:52 +00:00
if ( result = = MDB_SUCCESS )
2014-10-28 00:45:33 +00:00
{
2016-03-15 10:43:52 +00:00
mdb_cursor_count ( m_cur_output_amounts , & num_elems ) ;
2014-10-28 00:45:33 +00:00
}
2016-03-15 10:43:52 +00:00
else if ( result ! = MDB_NOTFOUND )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " DB error attempting to get number of outputs of an amount " ) ) ;
2014-10-28 00:45:33 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2014-10-28 00:45:33 +00:00
return num_elems ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 16:28:31 +00:00
// This is a lot harder now that we've removed the output_keys index
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
output_data_t BlockchainLMDB : : get_output_key ( const uint64_t & global_index ) const
2014-10-21 20:33:43 +00:00
{
2016-03-27 21:43:16 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ < < " (unused version - does nothing) " ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-04-04 16:28:31 +00:00
RCURSOR ( output_txs ) ;
RCURSOR ( tx_indices ) ;
RCURSOR ( txs ) ;
2014-10-28 00:45:33 +00:00
2016-04-04 16:28:31 +00:00
output_data_t od ;
MDB_val_set ( v , global_index ) ;
auto get_result = mdb_cursor_get ( m_cur_output_txs , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2014-10-31 21:34:15 +00:00
if ( get_result = = MDB_NOTFOUND )
2016-04-04 16:28:31 +00:00
throw1 ( OUTPUT_DNE ( " output with given index not in db " ) ) ;
2014-10-31 21:34:15 +00:00
else if ( get_result )
2016-04-04 16:28:31 +00:00
throw0 ( DB_ERROR ( " DB error attempting to fetch output tx hash " ) ) ;
outtx * ot = ( outtx * ) v . mv_data ;
MDB_val_set ( val_h , ot - > tx_hash ) ;
get_result = mdb_cursor_get ( m_cur_tx_indices , ( MDB_val * ) & zerokval , & val_h , MDB_GET_BOTH ) ;
if ( get_result )
throw0 ( DB_ERROR ( lmdb_error ( std : : string ( " DB error attempting to fetch transaction index from hash " ) + epee : : string_tools : : pod_to_hex ( ot - > tx_hash ) + " : " , get_result ) . c_str ( ) ) ) ;
txindex * tip = ( txindex * ) val_h . mv_data ;
MDB_val_set ( val_tx_id , tip - > data . tx_id ) ;
MDB_val result ;
get_result = mdb_cursor_get ( m_cur_txs , & val_tx_id , & result , MDB_SET ) ;
if ( get_result = = MDB_NOTFOUND )
throw1 ( TX_DNE ( std : : string ( " tx with hash " ) . append ( epee : : string_tools : : pod_to_hex ( ot - > tx_hash ) ) . append ( " not found in db " ) . c_str ( ) ) ) ;
else if ( get_result )
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to fetch tx from hash " , get_result ) . c_str ( ) ) ) ;
blobdata bd ;
bd . assign ( reinterpret_cast < char * > ( result . mv_data ) , result . mv_size ) ;
transaction tx ;
if ( ! parse_and_validate_tx_from_blob ( bd , tx ) )
throw0 ( DB_ERROR ( " Failed to parse tx from blob retrieved from the db " ) ) ;
const tx_out tx_output = tx . vout [ ot - > local_index ] ;
od . unlock_time = tip - > data . unlock_time ;
od . height = tip - > data . block_id ;
od . pubkey = boost : : get < txout_to_key > ( tx_output . target ) . key ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2016-04-04 16:28:31 +00:00
return od ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
output_data_t BlockchainLMDB : : get_output_key ( const uint64_t & amount , const uint64_t & index )
{
2015-12-14 04:54:39 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-27 21:43:16 +00:00
RCURSOR ( output_amounts ) ;
2014-10-28 00:45:33 +00:00
2016-03-27 21:43:16 +00:00
MDB_val_set ( k , amount ) ;
MDB_val_set ( v , index ) ;
auto get_result = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_GET_BOTH ) ;
2014-10-31 21:34:15 +00:00
if ( get_result = = MDB_NOTFOUND )
2016-03-27 21:43:16 +00:00
throw1 ( OUTPUT_DNE ( " Attempting to get output pubkey by index, but key does not exist " ) ) ;
2014-10-31 21:34:15 +00:00
else if ( get_result )
2014-12-12 21:34:45 +00:00
throw0 ( DB_ERROR ( " Error attempting to retrieve an output pubkey from the db " ) ) ;
2016-06-29 18:55:49 +00:00
output_data_t ret ;
if ( amount = = 0 )
{
const outkey * okp = ( const outkey * ) v . mv_data ;
ret = okp - > data ;
}
else
{
const pre_rct_outkey * okp = ( const pre_rct_outkey * ) v . mv_data ;
memcpy ( & ret , & okp - > data , sizeof ( pre_rct_output_data_t ) ) ; ;
ret . commitment = rct : : zeroCommit ( amount ) ;
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-14 18:47:13 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 01:10:58 +00:00
tx_out_index BlockchainLMDB : : get_output_tx_and_index_from_global ( const uint64_t & output_id ) const
2014-12-14 20:20:41 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( output_txs ) ;
2015-12-14 18:47:13 +00:00
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , output_id ) ;
2014-12-14 20:20:41 +00:00
2016-03-28 17:26:37 +00:00
auto get_result = mdb_cursor_get ( m_cur_output_txs , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2014-12-14 20:20:41 +00:00
if ( get_result = = MDB_NOTFOUND )
throw1 ( OUTPUT_DNE ( " output with given index not in db " ) ) ;
else if ( get_result )
throw0 ( DB_ERROR ( " DB error attempting to fetch output tx hash " ) ) ;
2016-03-27 21:43:16 +00:00
outtx * ot = ( outtx * ) v . mv_data ;
tx_out_index ret = tx_out_index ( ot - > tx_hash , ot - > local_index ) ;
2015-12-10 01:42:36 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2014-12-14 20:20:41 +00:00
}
2016-08-01 21:16:00 +00:00
tx_out_index BlockchainLMDB : : get_output_tx_and_index ( const uint64_t & amount , const uint64_t & index ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-12-14 04:54:39 +00:00
std : : vector < uint64_t > offsets ;
std : : vector < tx_out_index > indices ;
offsets . push_back ( index ) ;
get_output_tx_and_index ( amount , offsets , indices ) ;
if ( ! indices . size ( ) )
2014-12-12 21:34:45 +00:00
throw1 ( OUTPUT_DNE ( " Attempting to get an output index by amount and amount index, but amount not found " ) ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 04:54:39 +00:00
return indices [ 0 ] ;
2014-10-21 20:33:43 +00:00
}
2016-04-04 01:10:58 +00:00
std : : vector < uint64_t > BlockchainLMDB : : get_tx_amount_output_indices ( const uint64_t tx_id ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-01-31 13:10:14 +00:00
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-28 00:45:33 +00:00
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( tx_outputs ) ;
2014-10-28 00:45:33 +00:00
2016-01-31 13:10:14 +00:00
int result = 0 ;
2016-04-04 01:10:58 +00:00
MDB_val_set ( k_tx_id , tx_id ) ;
2014-10-28 00:45:33 +00:00
MDB_val v ;
2016-04-04 01:10:58 +00:00
std : : vector < uint64_t > amount_output_indices ;
2016-01-31 13:10:14 +00:00
2016-04-04 01:10:58 +00:00
result = mdb_cursor_get ( m_cur_tx_outputs , & k_tx_id , & v , MDB_SET ) ;
2014-10-28 00:45:33 +00:00
if ( result = = MDB_NOTFOUND )
2016-03-27 21:43:16 +00:00
LOG_PRINT_L0 ( " WARNING: Unexpected: tx has no amount indices stored in "
2016-01-31 13:10:14 +00:00
" tx_outputs, but it should have an empty entry even if it's a tx without "
" outputs " ) ;
2014-10-28 00:45:33 +00:00
else if ( result )
2016-03-27 21:43:16 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " DB error attempting to get data for tx_outputs[tx_index] " , result ) . c_str ( ) ) ) ;
2014-10-28 00:45:33 +00:00
2016-12-01 14:28:09 +00:00
const uint64_t * indices = ( const uint64_t * ) v . mv_data ;
2016-03-27 21:43:16 +00:00
int num_outputs = v . mv_size / sizeof ( uint64_t ) ;
2014-10-28 00:45:33 +00:00
2016-12-01 14:28:09 +00:00
amount_output_indices . reserve ( num_outputs ) ;
2016-01-31 13:10:14 +00:00
for ( int i = 0 ; i < num_outputs ; + + i )
2014-10-28 00:45:33 +00:00
{
2016-01-31 13:10:14 +00:00
// LOG_PRINT_L0("amount output index[" << 2*i << "]" << ": " << paired_indices[2*i] << " global output index: " << paired_indices[2*i+1]);
2016-03-27 21:43:16 +00:00
amount_output_indices . push_back ( indices [ i ] ) ;
2014-10-28 00:45:33 +00:00
}
2016-03-27 21:43:16 +00:00
indices = nullptr ;
2014-10-28 00:45:33 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2016-01-31 13:10:14 +00:00
return amount_output_indices ;
2014-10-21 20:33:43 +00:00
}
2015-01-09 20:57:33 +00:00
2014-12-06 21:37:22 +00:00
bool BlockchainLMDB : : has_key_image ( const crypto : : key_image & img ) const
2014-10-21 20:33:43 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2014-10-23 23:47:36 +00:00
2016-03-06 05:05:49 +00:00
bool ret ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( spent_keys ) ;
2014-10-23 23:47:36 +00:00
2016-03-06 05:05:49 +00:00
MDB_val k = { sizeof ( img ) , ( void * ) & img } ;
ret = ( mdb_cursor_get ( m_cur_spent_keys , ( MDB_val * ) & zerokval , & k , MDB_GET_BOTH ) = = 0 ) ;
2014-10-23 23:47:36 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2016-03-15 10:43:52 +00:00
return ret ;
2014-10-21 20:33:43 +00:00
}
2015-10-25 10:45:25 +00:00
bool BlockchainLMDB : : for_all_key_images ( std : : function < bool ( const crypto : : key_image & ) > f ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( spent_keys ) ;
2015-10-25 10:45:25 +00:00
2016-04-04 16:28:31 +00:00
MDB_val k , v ;
2015-10-25 10:45:25 +00:00
bool ret = true ;
2016-04-04 16:28:31 +00:00
k = zerokval ;
2015-10-25 10:45:25 +00:00
MDB_cursor_op op = MDB_FIRST ;
while ( 1 )
{
2016-02-18 12:09:57 +00:00
int ret = mdb_cursor_get ( m_cur_spent_keys , & k , & v , op ) ;
2015-10-25 10:45:25 +00:00
op = MDB_NEXT ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret < 0 )
throw0 ( DB_ERROR ( " Failed to enumerate key images " ) ) ;
2016-04-04 16:28:31 +00:00
const crypto : : key_image k_image = * ( const crypto : : key_image * ) v . mv_data ;
2015-10-25 10:45:25 +00:00
if ( ! f ( k_image ) ) {
ret = false ;
break ;
}
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-10-25 10:45:25 +00:00
return ret ;
}
2017-06-01 12:29:51 +00:00
bool BlockchainLMDB : : for_blocks_range ( const uint64_t & h1 , const uint64_t & h2 , std : : function < bool ( uint64_t , const crypto : : hash & , const cryptonote : : block & ) > f ) const
2015-10-25 10:45:25 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( blocks ) ;
2015-10-25 10:45:25 +00:00
MDB_val k ;
MDB_val v ;
bool ret = true ;
2017-06-01 12:29:51 +00:00
MDB_cursor_op op ;
if ( h1 )
{
2017-09-19 13:11:24 +00:00
k = MDB_val { sizeof ( h1 ) , ( void * ) & h1 } ;
op = MDB_SET ;
2017-06-01 12:29:51 +00:00
} else
{
op = MDB_FIRST ;
}
2015-10-25 10:45:25 +00:00
while ( 1 )
{
2016-02-18 12:09:57 +00:00
int ret = mdb_cursor_get ( m_cur_blocks , & k , & v , op ) ;
2015-10-25 10:45:25 +00:00
op = MDB_NEXT ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret )
throw0 ( DB_ERROR ( " Failed to enumerate blocks " ) ) ;
2015-12-14 18:47:13 +00:00
uint64_t height = * ( const uint64_t * ) k . mv_data ;
2015-10-25 10:45:25 +00:00
blobdata bd ;
bd . assign ( reinterpret_cast < char * > ( v . mv_data ) , v . mv_size ) ;
block b ;
if ( ! parse_and_validate_block_from_blob ( bd , b ) )
throw0 ( DB_ERROR ( " Failed to parse block from blob retrieved from the db " ) ) ;
crypto : : hash hash ;
if ( ! get_block_hash ( b , hash ) )
throw0 ( DB_ERROR ( " Failed to get block hash from blob retrieved from the db " ) ) ;
if ( ! f ( height , hash , b ) ) {
ret = false ;
break ;
}
2017-06-01 12:29:51 +00:00
if ( height > = h2 )
break ;
2015-10-25 10:45:25 +00:00
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-10-25 10:45:25 +00:00
return ret ;
}
bool BlockchainLMDB : : for_all_transactions ( std : : function < bool ( const crypto : : hash & , const cryptonote : : transaction & ) > f ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( txs ) ;
2016-04-04 16:28:31 +00:00
RCURSOR ( tx_indices ) ;
2015-10-25 10:45:25 +00:00
MDB_val k ;
MDB_val v ;
bool ret = true ;
MDB_cursor_op op = MDB_FIRST ;
while ( 1 )
{
2016-04-04 16:28:31 +00:00
int ret = mdb_cursor_get ( m_cur_tx_indices , & k , & v , op ) ;
2015-10-25 10:45:25 +00:00
op = MDB_NEXT ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret )
2016-04-04 16:28:31 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate transactions: " , ret ) . c_str ( ) ) ) ;
txindex * ti = ( txindex * ) v . mv_data ;
const crypto : : hash hash = ti - > key ;
k . mv_data = ( void * ) & ti - > data . tx_id ;
k . mv_size = sizeof ( ti - > data . tx_id ) ;
ret = mdb_cursor_get ( m_cur_txs , & k , & v , MDB_SET ) ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate transactions: " , ret ) . c_str ( ) ) ) ;
2015-10-25 10:45:25 +00:00
blobdata bd ;
bd . assign ( reinterpret_cast < char * > ( v . mv_data ) , v . mv_size ) ;
transaction tx ;
if ( ! parse_and_validate_tx_from_blob ( bd , tx ) )
throw0 ( DB_ERROR ( " Failed to parse tx from blob retrieved from the db " ) ) ;
if ( ! f ( hash , tx ) ) {
ret = false ;
break ;
}
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-10-25 10:45:25 +00:00
return ret ;
}
bool BlockchainLMDB : : for_all_outputs ( std : : function < bool ( uint64_t amount , const crypto : : hash & tx_hash , size_t tx_idx ) > f ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( output_amounts ) ;
2015-10-25 10:45:25 +00:00
MDB_val k ;
MDB_val v ;
bool ret = true ;
MDB_cursor_op op = MDB_FIRST ;
while ( 1 )
{
2016-02-18 12:09:57 +00:00
int ret = mdb_cursor_get ( m_cur_output_amounts , & k , & v , op ) ;
2015-10-25 10:45:25 +00:00
op = MDB_NEXT ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret )
throw0 ( DB_ERROR ( " Failed to enumerate outputs " ) ) ;
2015-12-14 18:47:13 +00:00
uint64_t amount = * ( const uint64_t * ) k . mv_data ;
2016-03-27 21:43:16 +00:00
outkey * ok = ( outkey * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
tx_out_index toi = get_output_tx_and_index_from_global ( ok - > output_id ) ;
2015-10-25 10:45:25 +00:00
if ( ! f ( amount , toi . first , toi . second ) ) {
ret = false ;
break ;
}
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-10-25 10:45:25 +00:00
return ret ;
}
2015-07-12 05:46:16 +00:00
// batch_num_blocks: (optional) Used to check if resize needed before batch transaction starts.
2016-12-26 22:29:46 +00:00
bool BlockchainLMDB : : batch_start ( uint64_t batch_num_blocks )
2015-02-11 23:55:53 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
if ( ! m_batch_transactions )
throw0 ( DB_ERROR ( " batch transactions not enabled " ) ) ;
if ( m_batch_active )
2016-12-26 22:29:46 +00:00
return false ;
2015-05-17 02:05:54 +00:00
if ( m_write_batch_txn ! = nullptr )
2016-12-26 22:29:46 +00:00
return false ;
2015-02-11 23:55:53 +00:00
if ( m_write_txn )
throw0 ( DB_ERROR ( " batch transaction attempted, but m_write_txn already in use " ) ) ;
check_open ( ) ;
2015-05-17 02:05:54 +00:00
2016-03-14 18:26:15 +00:00
m_writer = boost : : this_thread : : get_id ( ) ;
2015-07-12 05:46:16 +00:00
check_and_resize_for_batch ( batch_num_blocks ) ;
2015-05-18 09:45:15 +00:00
m_write_batch_txn = new mdb_txn_safe ( ) ;
2015-02-11 23:55:53 +00:00
// NOTE: need to make sure it's destroyed properly when done
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , 0 , * m_write_batch_txn ) )
2016-02-13 11:41:22 +00:00
{
delete m_write_batch_txn ;
m_write_batch_txn = nullptr ;
2015-12-13 16:45:03 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
2016-02-13 11:41:22 +00:00
}
2015-02-11 23:55:53 +00:00
// indicates this transaction is for batch transactions, but not whether it's
// active
2015-05-17 02:05:54 +00:00
m_write_batch_txn - > m_batch_txn = true ;
m_write_txn = m_write_batch_txn ;
2016-02-18 12:09:57 +00:00
2015-02-11 23:55:53 +00:00
m_batch_active = true ;
2016-02-18 12:09:57 +00:00
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2016-01-07 06:33:22 +00:00
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " batch transaction: begin " ) ;
2016-12-26 22:29:46 +00:00
return true ;
2015-02-11 23:55:53 +00:00
}
void BlockchainLMDB : : batch_commit ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
if ( ! m_batch_transactions )
throw0 ( DB_ERROR ( " batch transactions not enabled " ) ) ;
if ( ! m_batch_active )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
2015-05-17 02:05:54 +00:00
if ( m_write_batch_txn = = nullptr )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
2017-01-15 15:50:56 +00:00
if ( m_writer ! = boost : : this_thread : : get_id ( ) )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction owned by other thread " ) ) ;
2017-01-15 15:50:56 +00:00
2015-02-11 23:55:53 +00:00
check_open ( ) ;
2015-05-17 02:05:54 +00:00
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " batch transaction: committing... " ) ;
2015-02-11 23:55:53 +00:00
TIME_MEASURE_START ( time1 ) ;
2015-02-11 23:55:53 +00:00
m_write_txn - > commit ( ) ;
2015-02-11 23:55:53 +00:00
TIME_MEASURE_FINISH ( time1 ) ;
time_commit1 + = time1 ;
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " batch transaction: committed " ) ;
2015-05-17 02:05:54 +00:00
m_write_txn = nullptr ;
delete m_write_batch_txn ;
2016-03-14 18:26:15 +00:00
m_write_batch_txn = nullptr ;
2016-02-18 12:09:57 +00:00
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2015-02-11 23:55:53 +00:00
}
2017-08-29 14:43:32 +00:00
void BlockchainLMDB : : cleanup_batch ( )
{
// for destruction of batch transaction
m_write_txn = nullptr ;
delete m_write_batch_txn ;
m_write_batch_txn = nullptr ;
m_batch_active = false ;
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
}
2015-02-11 23:55:53 +00:00
void BlockchainLMDB : : batch_stop ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
if ( ! m_batch_transactions )
throw0 ( DB_ERROR ( " batch transactions not enabled " ) ) ;
if ( ! m_batch_active )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
2015-05-17 02:05:54 +00:00
if ( m_write_batch_txn = = nullptr )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
2017-01-15 15:50:56 +00:00
if ( m_writer ! = boost : : this_thread : : get_id ( ) )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction owned by other thread " ) ) ;
2015-02-11 23:55:53 +00:00
check_open ( ) ;
LOG_PRINT_L3 ( " batch transaction: committing... " ) ;
2015-02-11 23:55:53 +00:00
TIME_MEASURE_START ( time1 ) ;
2017-08-29 14:43:32 +00:00
try
{
m_write_txn - > commit ( ) ;
TIME_MEASURE_FINISH ( time1 ) ;
time_commit1 + = time1 ;
cleanup_batch ( ) ;
}
catch ( const std : : exception & e )
{
cleanup_batch ( ) ;
throw ;
}
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " batch transaction: end " ) ;
}
void BlockchainLMDB : : batch_abort ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
if ( ! m_batch_transactions )
throw0 ( DB_ERROR ( " batch transactions not enabled " ) ) ;
if ( ! m_batch_active )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
if ( m_write_batch_txn = = nullptr )
throw1 ( DB_ERROR ( " batch transaction not in progress " ) ) ;
2017-01-15 15:50:56 +00:00
if ( m_writer ! = boost : : this_thread : : get_id ( ) )
2017-01-16 20:14:56 +00:00
throw1 ( DB_ERROR ( " batch transaction owned by other thread " ) ) ;
2015-02-11 23:55:53 +00:00
check_open ( ) ;
// for destruction of batch transaction
m_write_txn = nullptr ;
// explicitly call in case mdb_env_close() (BlockchainLMDB::close()) called before BlockchainLMDB destructor called.
2015-05-17 02:05:54 +00:00
m_write_batch_txn - > abort ( ) ;
2016-03-14 18:26:15 +00:00
delete m_write_batch_txn ;
2015-05-18 09:45:15 +00:00
m_write_batch_txn = nullptr ;
2016-03-14 18:26:15 +00:00
m_batch_active = false ;
2016-02-18 12:09:57 +00:00
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " batch transaction: aborted " ) ;
}
void BlockchainLMDB : : set_batch_transactions ( bool batch_transactions )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-03-04 17:40:22 +00:00
if ( ( batch_transactions ) & & ( m_batch_transactions ) )
{
LOG_PRINT_L0 ( " WARNING: batch transaction mode already enabled, but asked to enable batch mode " ) ;
}
2015-02-11 23:55:53 +00:00
m_batch_transactions = batch_transactions ;
LOG_PRINT_L3 ( " batch transactions " < < ( m_batch_transactions ? " enabled " : " disabled " ) ) ;
}
2016-02-18 12:09:57 +00:00
// return true if we started the txn, false if already started
2016-03-14 18:26:15 +00:00
bool BlockchainLMDB : : block_rtxn_start ( MDB_txn * * mtxn , mdb_txn_cursors * * mcur ) const
2016-02-18 12:09:57 +00:00
{
2016-03-14 18:26:15 +00:00
bool ret = false ;
if ( m_write_txn & & m_writer = = boost : : this_thread : : get_id ( ) ) {
* mtxn = m_write_txn - > m_txn ;
* mcur = ( mdb_txn_cursors * ) & m_wcursors ;
return ret ;
}
2016-02-18 12:09:57 +00:00
if ( ! m_tinfo . get ( ) )
{
m_tinfo . reset ( new mdb_threadinfo ) ;
memset ( & m_tinfo - > m_ti_rcursors , 0 , sizeof ( m_tinfo - > m_ti_rcursors ) ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , MDB_RDONLY , & m_tinfo - > m_ti_rtxn ) )
2016-02-18 12:09:57 +00:00
throw0 ( DB_ERROR_TXN_START ( lmdb_error ( " Failed to create a read transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
2016-03-14 18:26:15 +00:00
ret = true ;
2016-02-18 12:09:57 +00:00
} else if ( ! m_tinfo - > m_ti_rflags . m_rf_txn )
{
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_renew ( m_tinfo - > m_ti_rtxn ) )
2016-02-18 12:09:57 +00:00
throw0 ( DB_ERROR_TXN_START ( lmdb_error ( " Failed to renew a read transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
2016-03-14 18:26:15 +00:00
ret = true ;
2016-02-18 12:09:57 +00:00
}
2016-03-14 18:26:15 +00:00
if ( ret )
m_tinfo - > m_ti_rflags . m_rf_txn = true ;
* mtxn = m_tinfo - > m_ti_rtxn ;
* mcur = & m_tinfo - > m_ti_rcursors ;
2016-03-15 10:43:52 +00:00
if ( ret )
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-03-14 18:26:15 +00:00
return ret ;
2016-02-18 12:09:57 +00:00
}
void BlockchainLMDB : : block_rtxn_stop ( ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
mdb_txn_reset ( m_tinfo - > m_ti_rtxn ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
}
void BlockchainLMDB : : block_txn_start ( bool readonly )
2016-02-08 16:32:19 +00:00
{
2016-02-18 12:09:57 +00:00
if ( readonly )
{
bool didit = false ;
2016-03-14 18:26:15 +00:00
if ( m_write_txn & & m_writer = = boost : : this_thread : : get_id ( ) )
2016-02-18 12:09:57 +00:00
return ;
if ( ! m_tinfo . get ( ) )
{
m_tinfo . reset ( new mdb_threadinfo ) ;
memset ( & m_tinfo - > m_ti_rcursors , 0 , sizeof ( m_tinfo - > m_ti_rcursors ) ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , MDB_RDONLY , & m_tinfo - > m_ti_rtxn ) )
2016-02-18 12:09:57 +00:00
throw0 ( DB_ERROR_TXN_START ( lmdb_error ( " Failed to create a read transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
didit = true ;
} else if ( ! m_tinfo - > m_ti_rflags . m_rf_txn )
{
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_renew ( m_tinfo - > m_ti_rtxn ) )
2016-02-18 12:09:57 +00:00
throw0 ( DB_ERROR_TXN_START ( lmdb_error ( " Failed to renew a read transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
didit = true ;
}
if ( didit )
{
m_tinfo - > m_ti_rflags . m_rf_txn = true ;
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ < < " RO " ) ;
}
return ;
}
2016-02-08 16:32:19 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-02-13 12:10:27 +00:00
// Distinguish the exceptions here from exceptions that would be thrown while
// using the txn and committing it.
//
// If an exception is thrown in this setup, we don't want the caller to catch
// it and proceed as if there were an existing write txn, such as trying to
// call block_txn_abort(). It also indicates a serious issue which will
// probably be thrown up another layer.
2016-02-08 16:32:19 +00:00
if ( ! m_batch_active & & m_write_txn )
2016-02-13 12:10:27 +00:00
throw0 ( DB_ERROR_TXN_START ( ( std : : string ( " Attempted to start new write txn when write txn already exists in " ) + __FUNCTION__ ) . c_str ( ) ) ) ;
2016-02-08 16:32:19 +00:00
if ( ! m_batch_active )
{
2016-03-14 18:26:15 +00:00
m_writer = boost : : this_thread : : get_id ( ) ;
2016-02-08 16:32:19 +00:00
m_write_txn = new mdb_txn_safe ( ) ;
2017-02-18 21:00:55 +00:00
if ( auto mdb_res = lmdb_txn_begin ( m_env , NULL , 0 , * m_write_txn ) )
2016-02-13 11:41:22 +00:00
{
delete m_write_txn ;
m_write_txn = nullptr ;
2016-02-13 12:10:27 +00:00
throw0 ( DB_ERROR_TXN_START ( lmdb_error ( " Failed to create a transaction for the db: " , mdb_res ) . c_str ( ) ) ) ;
2016-02-13 11:41:22 +00:00
}
2016-02-18 12:09:57 +00:00
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2017-03-19 12:27:03 +00:00
} else if ( m_writer ! = boost : : this_thread : : get_id ( ) )
throw0 ( DB_ERROR_TXN_START ( ( std : : string ( " Attempted to start new write txn when batch txn already exists in " ) + __FUNCTION__ ) . c_str ( ) ) ) ;
2016-02-08 16:32:19 +00:00
}
void BlockchainLMDB : : block_txn_stop ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-03-15 09:46:30 +00:00
if ( m_write_txn & & m_writer = = boost : : this_thread : : get_id ( ) )
2016-02-08 16:32:19 +00:00
{
2016-03-15 09:46:30 +00:00
if ( ! m_batch_active )
2016-02-18 12:09:57 +00:00
{
TIME_MEASURE_START ( time1 ) ;
m_write_txn - > commit ( ) ;
TIME_MEASURE_FINISH ( time1 ) ;
time_commit1 + = time1 ;
2016-02-08 16:32:19 +00:00
2016-02-18 12:09:57 +00:00
delete m_write_txn ;
m_write_txn = nullptr ;
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
}
2016-03-15 09:46:30 +00:00
}
else if ( m_tinfo - > m_ti_rtxn )
{
mdb_txn_reset ( m_tinfo - > m_ti_rtxn ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
2016-02-08 16:32:19 +00:00
}
}
void BlockchainLMDB : : block_txn_abort ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-03-15 09:46:30 +00:00
if ( m_write_txn & & m_writer = = boost : : this_thread : : get_id ( ) )
2016-02-08 16:32:19 +00:00
{
2016-03-15 09:46:30 +00:00
if ( ! m_batch_active )
2016-02-13 11:50:42 +00:00
{
delete m_write_txn ;
m_write_txn = nullptr ;
2016-02-18 12:09:57 +00:00
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2016-02-13 11:50:42 +00:00
}
2016-03-15 09:46:30 +00:00
}
else if ( m_tinfo - > m_ti_rtxn )
{
mdb_txn_reset ( m_tinfo - > m_ti_rtxn ) ;
memset ( & m_tinfo - > m_ti_rflags , 0 , sizeof ( m_tinfo - > m_ti_rflags ) ) ;
}
else
{
// This would probably mean an earlier exception was caught, but then we
// proceeded further than we should have.
throw0 ( DB_ERROR ( ( std : : string ( " BlockchainLMDB:: " ) + __func__ +
std : : string ( " : block-level DB transaction abort called when write txn doesn't exist " )
) . c_str ( ) ) ) ;
2016-02-08 16:32:19 +00:00
}
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
uint64_t BlockchainLMDB : : add_block ( const block & blk , const size_t & block_size , const difficulty_type & cumulative_difficulty , const uint64_t & coins_generated ,
2015-12-14 04:54:39 +00:00
const std : : vector < transaction > & txs )
2014-10-23 19:37:10 +00:00
{
2014-10-30 04:58:14 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2014-10-23 19:37:10 +00:00
check_open ( ) ;
2017-01-14 19:27:52 +00:00
uint64_t m_height = height ( ) ;
2015-02-11 23:55:53 +00:00
2015-05-18 10:18:31 +00:00
if ( m_height % 1000 = = 0 )
2015-05-17 02:05:54 +00:00
{
2015-07-12 05:46:16 +00:00
// for batch mode, DB resize check is done at start of batch transaction
if ( ! m_batch_active & & need_resize ( ) )
2015-05-17 02:05:54 +00:00
{
2016-09-21 19:40:07 +00:00
LOG_PRINT_L0 ( " LMDB memory map needs to be resized, doing that now. " ) ;
2015-05-17 02:05:54 +00:00
do_resize ( ) ;
}
}
2014-10-23 23:47:36 +00:00
try
{
BlockchainDB : : add_block ( blk , block_size , cumulative_difficulty , coins_generated , txs ) ;
}
2016-02-13 12:10:27 +00:00
catch ( DB_ERROR_TXN_START & e )
{
throw ;
}
2014-10-23 23:47:36 +00:00
catch ( . . . )
{
2016-02-08 16:32:36 +00:00
block_txn_abort ( ) ;
2014-10-23 23:47:36 +00:00
throw ;
}
2014-10-23 19:37:10 +00:00
2014-10-23 23:47:36 +00:00
return + + m_height ;
2014-10-23 19:37:10 +00:00
}
2014-10-30 22:33:35 +00:00
void BlockchainLMDB : : pop_block ( block & blk , std : : vector < transaction > & txs )
{
2015-02-11 23:55:53 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-02-19 04:52:44 +00:00
check_open ( ) ;
2016-02-18 12:09:57 +00:00
block_txn_start ( false ) ;
2014-10-30 22:33:35 +00:00
try
{
BlockchainDB : : pop_block ( blk , txs ) ;
2016-02-18 12:09:57 +00:00
block_txn_stop ( ) ;
2014-10-30 22:33:35 +00:00
}
catch ( . . . )
{
2016-02-18 12:09:57 +00:00
block_txn_abort ( ) ;
2014-10-30 22:33:35 +00:00
throw ;
}
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
void BlockchainLMDB : : get_output_tx_and_index_from_global ( const std : : vector < uint64_t > & global_indices ,
2015-12-14 04:54:39 +00:00
std : : vector < tx_out_index > & tx_out_indices ) const
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
{
2015-12-14 04:54:39 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
tx_out_indices . clear ( ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2015-12-14 04:54:39 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( output_txs ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-04-04 01:10:58 +00:00
for ( const uint64_t & output_id : global_indices )
2015-12-14 04:54:39 +00:00
{
2016-04-04 01:10:58 +00:00
MDB_val_set ( v , output_id ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-03-28 17:26:37 +00:00
auto get_result = mdb_cursor_get ( m_cur_output_txs , ( MDB_val * ) & zerokval , & v , MDB_GET_BOTH ) ;
2015-12-14 04:54:39 +00:00
if ( get_result = = MDB_NOTFOUND )
throw1 ( OUTPUT_DNE ( " output with given index not in db " ) ) ;
else if ( get_result )
throw0 ( DB_ERROR ( " DB error attempting to fetch output tx hash " ) ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-03-27 21:43:16 +00:00
outtx * ot = ( outtx * ) v . mv_data ;
auto result = tx_out_index ( ot - > tx_hash , ot - > local_index ) ;
2015-12-14 04:54:39 +00:00
tx_out_indices . push_back ( result ) ;
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
2017-02-13 19:05:30 +00:00
void BlockchainLMDB : : get_output_key ( const uint64_t & amount , const std : : vector < uint64_t > & offsets , std : : vector < output_data_t > & outputs , bool allow_partial )
2015-12-14 04:54:39 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2016-03-27 21:43:16 +00:00
TIME_MEASURE_START ( db3 ) ;
2015-12-14 04:54:39 +00:00
check_open ( ) ;
2016-03-27 21:43:16 +00:00
outputs . clear ( ) ;
2015-12-14 04:54:39 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-03-27 21:43:16 +00:00
RCURSOR ( output_amounts ) ;
2015-12-14 04:54:39 +00:00
2016-03-27 21:43:16 +00:00
MDB_val_set ( k , amount ) ;
for ( const uint64_t & index : offsets )
2015-12-14 04:54:39 +00:00
{
2016-03-27 21:43:16 +00:00
MDB_val_set ( v , index ) ;
2015-12-14 04:54:39 +00:00
2016-03-27 21:43:16 +00:00
auto get_result = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_GET_BOTH ) ;
if ( get_result = = MDB_NOTFOUND )
2017-02-13 19:05:30 +00:00
{
if ( allow_partial )
{
MDEBUG ( " Partial result: " < < outputs . size ( ) < < " / " < < offsets . size ( ) ) ;
break ;
}
2016-12-01 14:31:40 +00:00
throw1 ( OUTPUT_DNE ( ( std : : string ( " Attempting to get output pubkey by global index (amount " ) + boost : : lexical_cast < std : : string > ( amount ) + " , index " + boost : : lexical_cast < std : : string > ( index ) + " , count " + boost : : lexical_cast < std : : string > ( get_num_outputs ( amount ) ) + " ), but key does not exist " ) . c_str ( ) ) ) ;
2017-02-13 19:05:30 +00:00
}
2016-03-27 21:43:16 +00:00
else if ( get_result )
throw0 ( DB_ERROR ( lmdb_error ( " Error attempting to retrieve an output pubkey from the db " , get_result ) . c_str ( ) ) ) ;
2015-12-14 04:54:39 +00:00
2016-06-29 18:55:49 +00:00
output_data_t data ;
if ( amount = = 0 )
{
const outkey * okp = ( const outkey * ) v . mv_data ;
data = okp - > data ;
}
else
{
const pre_rct_outkey * okp = ( const pre_rct_outkey * ) v . mv_data ;
memcpy ( & data , & okp - > data , sizeof ( pre_rct_output_data_t ) ) ;
data . commitment = rct : : zeroCommit ( amount ) ;
}
2016-03-27 21:43:16 +00:00
outputs . push_back ( data ) ;
2015-12-14 04:54:39 +00:00
}
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-14 04:54:39 +00:00
2016-03-27 21:43:16 +00:00
TIME_MEASURE_FINISH ( db3 ) ;
LOG_PRINT_L3 ( " db3: " < < db3 ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
2016-08-01 21:16:00 +00:00
void BlockchainLMDB : : get_output_tx_and_index ( const uint64_t & amount , const std : : vector < uint64_t > & offsets , std : : vector < tx_out_index > & indices ) const
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
{
2015-12-14 04:54:39 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2016-03-27 21:43:16 +00:00
indices . clear ( ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-03-27 21:43:16 +00:00
std : : vector < uint64_t > tx_indices ;
2016-02-18 12:09:57 +00:00
TXN_PREFIX_RDONLY ( ) ;
2015-12-14 18:47:13 +00:00
2016-03-27 21:43:16 +00:00
RCURSOR ( output_amounts ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-03-27 21:43:16 +00:00
MDB_val_set ( k , amount ) ;
for ( const uint64_t & index : offsets )
2015-12-14 04:54:39 +00:00
{
2016-03-27 21:43:16 +00:00
MDB_val_set ( v , index ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-03-27 21:43:16 +00:00
auto get_result = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_GET_BOTH ) ;
if ( get_result = = MDB_NOTFOUND )
throw1 ( OUTPUT_DNE ( " Attempting to get output by index, but key does not exist " ) ) ;
else if ( get_result )
throw0 ( DB_ERROR ( lmdb_error ( " Error attempting to retrieve an output from the db " , get_result ) . c_str ( ) ) ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2016-12-01 14:28:09 +00:00
const outkey * okp = ( const outkey * ) v . mv_data ;
2016-04-04 01:10:58 +00:00
tx_indices . push_back ( okp - > output_id ) ;
2015-12-14 04:54:39 +00:00
}
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
2015-12-14 04:54:39 +00:00
TIME_MEASURE_START ( db3 ) ;
2016-03-27 21:43:16 +00:00
if ( tx_indices . size ( ) > 0 )
2015-12-14 04:54:39 +00:00
{
2016-03-27 21:43:16 +00:00
get_output_tx_and_index_from_global ( tx_indices , indices ) ;
2015-12-14 04:54:39 +00:00
}
TIME_MEASURE_FINISH ( db3 ) ;
LOG_PRINT_L3 ( " db3: " < < db3 ) ;
** CHANGES ARE EXPERIMENTAL (FOR TESTING ONLY)
Bockchain:
1. Optim: Multi-thread long-hash computation when encountering groups of blocks.
2. Optim: Cache verified txs and return result from cache instead of re-checking whenever possible.
3. Optim: Preload output-keys when encoutering groups of blocks. Sort by amount and global-index before bulk querying database and multi-thread when possible.
4. Optim: Disable double spend check on block verification, double spend is already detected when trying to add blocks.
5. Optim: Multi-thread signature computation whenever possible.
6. Patch: Disable locking (recursive mutex) on called functions from check_tx_inputs which causes slowdowns (only seems to happen on ubuntu/VMs??? Reason: TBD)
7. Optim: Removed looped full-tx hash computation when retrieving transactions from pool (???).
8. Optim: Cache difficulty/timestamps (735 blocks) for next-difficulty calculations so that only 2 db reads per new block is needed when a new block arrives (instead of 1470 reads).
Berkeley-DB:
1. Fix: 32-bit data errors causing wrong output global indices and failure to send blocks to peers (etc).
2. Fix: Unable to pop blocks on reorganize due to transaction errors.
3. Patch: Large number of transaction aborts when running multi-threaded bulk queries.
4. Patch: Insufficient locks error when running full sync.
5. Patch: Incorrect db stats when returning from an immediate exit from "pop block" operation.
6. Optim: Add bulk queries to get output global indices.
7. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
8. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
9. Optim: Added thread-safe buffers used when multi-threading bulk queries.
10. Optim: Added support for nosync/write_nosync options for improved performance (*see --db-sync-mode option for details)
11. Mod: Added checkpoint thread and auto-remove-logs option.
12. *Now usable on 32-bit systems like RPI2.
LMDB:
1. Optim: Added custom comparison for 256-bit key tables (minor speed-up, TBD: get actual effect)
2. Optim: Modified output_keys table to store public_key+unlock_time+height for single transaction lookup (vs 3)
3. Optim: Used output_keys table retrieve public_keys instead of going through output_amounts->output_txs+output_indices->txs->output:public_key
4. Optim: Added support for sync/writemap options for improved performance (*see --db-sync-mode option for details)
5. Mod: Auto resize to +1GB instead of multiplier x1.5
ETC:
1. Minor optimizations for slow-hash for ARM (RPI2). Incomplete.
2. Fix: 32-bit saturation bug when computing next difficulty on large blocks.
[PENDING ISSUES]
1. Berkely db has a very slow "pop-block" operation. This is very noticeable on the RPI2 as it sometimes takes > 10 MINUTES to pop a block during reorganization.
This does not happen very often however, most reorgs seem to take a few seconds but it possibly depends on the number of outputs present. TBD.
2. Berkeley db, possible bug "unable to allocate memory". TBD.
[NEW OPTIONS] (*Currently all enabled for testing purposes)
1. --fast-block-sync arg=[0:1] (default: 1)
a. 0 = Compute long hash per block (may take a while depending on CPU)
b. 1 = Skip long-hash and verify blocks based on embedded known good block hashes (faster, minimal CPU dependence)
2. --db-sync-mode arg=[[safe|fast|fastest]:[sync|async]:[nblocks_per_sync]] (default: fastest:async:1000)
a. safe = fdatasync/fsync (or equivalent) per stored block. Very slow, but safest option to protect against power-out/crash conditions.
b. fast/fastest = Enables asynchronous fdatasync/fsync (or equivalent). Useful for battery operated devices or STABLE systems with UPS and/or systems with battery backed write cache/solid state cache.
Fast - Write meta-data but defer data flush.
Fastest - Defer meta-data and data flush.
Sync - Flush data after nblocks_per_sync and wait.
Async - Flush data after nblocks_per_sync but do not wait for the operation to finish.
3. --prep-blocks-threads arg=[n] (default: 4 or system max threads, whichever is lower)
Max number of threads to use when computing long-hash in groups.
4. --show-time-stats arg=[0:1] (default: 1)
Show benchmark related time stats.
5. --db-auto-remove-logs arg=[0:1] (default: 1)
For berkeley-db only. Auto remove logs if enabled.
**Note: lmdb and berkeley-db have changes to the tables and are not compatible with official git head version.
At the moment, you need a full resync to use this optimized version.
[PERFORMANCE COMPARISON]
**Some figures are approximations only.
Using a baseline machine of an i7-2600K+SSD+(with full pow computation):
1. The optimized lmdb/blockhain core can process blocks up to 585K for ~1.25 hours + download time, so it usually takes 2.5 hours to sync the full chain.
2. The current head with memory can process blocks up to 585K for ~4.2 hours + download time, so it usually takes 5.5 hours to sync the full chain.
3. The current head with lmdb can process blocks up to 585K for ~32 hours + download time and usually takes 36 hours to sync the full chain.
Averate procesing times (with full pow computation):
lmdb-optimized:
1. tx_ave = 2.5 ms / tx
2. block_ave = 5.87 ms / block
memory-official-repo:
1. tx_ave = 8.85 ms / tx
2. block_ave = 19.68 ms / block
lmdb-official-repo (0f4a036437fd41a5498ee5e74e2422ea6177aa3e)
1. tx_ave = 47.8 ms / tx
2. block_ave = 64.2 ms / block
**Note: The following data denotes processing times only (does not include p2p download time)
lmdb-optimized processing times (with full pow computation):
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.25 hours processing time (--db-sync-mode=fastest:async:1000).
2. Laptop, Dual-core / 4-threads U4200 (3Mb) - 4.90 hours processing time (--db-sync-mode=fastest:async:1000).
3. Embedded, Quad-core / 4-threads Z3735F (2x1Mb) - 12.0 hours processing time (--db-sync-mode=fastest:async:1000).
lmdb-optimized processing times (with per-block-checkpoint)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 10 minutes processing time (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with full pow computation)
1. Desktop, Quad-core / 8-threads 2600k (8Mb) - 1.8 hours processing time (--db-sync-mode=fastest:async:1000).
2. RPI2. Improved from estimated 3 months(???) into 2.5 days (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
berkeley-db optimized processing times (with per-block-checkpoint)
1. RPI2. 12-15 hours (*Need 2AMP supply + Clock:1Ghz + [usb+ssd] to achieve this speed) (--db-sync-mode=fastest:async:1000).
2015-07-10 20:09:32 +00:00
}
2016-09-17 14:45:51 +00:00
std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > BlockchainLMDB : : get_output_histogram ( const std : : vector < uint64_t > & amounts , bool unlocked , uint64_t recent_cutoff ) const
2016-03-26 14:30:23 +00:00
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX_RDONLY ( ) ;
RCURSOR ( output_amounts ) ;
2016-09-17 14:45:51 +00:00
std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > histogram ;
2016-03-26 14:30:23 +00:00
MDB_val k ;
MDB_val v ;
if ( amounts . empty ( ) )
{
MDB_cursor_op op = MDB_FIRST ;
while ( 1 )
{
int ret = mdb_cursor_get ( m_cur_output_amounts , & k , & v , op ) ;
op = MDB_NEXT_NODUP ;
if ( ret = = MDB_NOTFOUND )
break ;
if ( ret )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate outputs: " , ret ) . c_str ( ) ) ) ;
mdb_size_t num_elems = 0 ;
mdb_cursor_count ( m_cur_output_amounts , & num_elems ) ;
uint64_t amount = * ( const uint64_t * ) k . mv_data ;
2016-09-17 14:45:51 +00:00
histogram [ amount ] = std : : make_tuple ( num_elems , 0 , 0 ) ;
2016-03-26 14:30:23 +00:00
}
}
else
{
for ( const auto & amount : amounts )
{
MDB_val_copy < uint64_t > k ( amount ) ;
int ret = mdb_cursor_get ( m_cur_output_amounts , & k , & v , MDB_SET ) ;
if ( ret = = MDB_NOTFOUND )
{
2016-09-17 14:45:51 +00:00
histogram [ amount ] = std : : make_tuple ( 0 , 0 , 0 ) ;
2016-03-26 14:30:23 +00:00
}
else if ( ret = = MDB_SUCCESS )
{
mdb_size_t num_elems = 0 ;
mdb_cursor_count ( m_cur_output_amounts , & num_elems ) ;
2016-09-17 14:45:51 +00:00
histogram [ amount ] = std : : make_tuple ( num_elems , 0 , 0 ) ;
2016-03-26 14:30:23 +00:00
}
else
{
throw0 ( DB_ERROR ( lmdb_error ( " Failed to enumerate outputs: " , ret ) . c_str ( ) ) ) ;
}
}
}
2016-09-17 14:45:51 +00:00
if ( unlocked | | recent_cutoff > 0 ) {
2016-08-01 21:16:00 +00:00
const uint64_t blockchain_height = height ( ) ;
2016-09-17 14:45:51 +00:00
for ( std : : map < uint64_t , std : : tuple < uint64_t , uint64_t , uint64_t > > : : iterator i = histogram . begin ( ) ; i ! = histogram . end ( ) ; + + i ) {
uint64_t amount = i - > first ;
uint64_t num_elems = std : : get < 0 > ( i - > second ) ;
2016-08-01 21:16:00 +00:00
while ( num_elems > 0 ) {
const tx_out_index toi = get_output_tx_and_index ( amount , num_elems - 1 ) ;
const uint64_t height = get_tx_block_height ( toi . first ) ;
if ( height + CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE < = blockchain_height )
break ;
- - num_elems ;
}
// modifying second does not invalidate the iterator
2016-09-17 14:45:51 +00:00
std : : get < 1 > ( i - > second ) = num_elems ;
if ( recent_cutoff > 0 )
{
uint64_t recent = 0 ;
while ( num_elems > 0 ) {
const tx_out_index toi = get_output_tx_and_index ( amount , num_elems - 1 ) ;
const uint64_t height = get_tx_block_height ( toi . first ) ;
const uint64_t ts = get_block_timestamp ( height ) ;
if ( ts < recent_cutoff )
break ;
- - num_elems ;
+ + recent ;
}
// modifying second does not invalidate the iterator
std : : get < 2 > ( i - > second ) = recent ;
}
2016-08-01 21:16:00 +00:00
}
}
2016-03-26 14:30:23 +00:00
TXN_POSTFIX_RDONLY ( ) ;
return histogram ;
}
2016-01-15 14:00:58 +00:00
void BlockchainLMDB : : check_hard_fork_info ( )
{
}
2016-02-05 01:15:37 +00:00
void BlockchainLMDB : : drop_hard_fork_info ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
TXN_PREFIX ( 0 ) ;
mdb_drop ( * txn_ptr , m_hf_starting_heights , 1 ) ;
mdb_drop ( * txn_ptr , m_hf_versions , 1 ) ;
TXN_POSTFIX_SUCCESS ( ) ;
}
2015-09-20 17:41:38 +00:00
void BlockchainLMDB : : set_hard_fork_version ( uint64_t height , uint8_t version )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2016-02-08 12:58:08 +00:00
TXN_BLOCK_PREFIX ( 0 ) ;
2015-09-20 17:41:38 +00:00
MDB_val_copy < uint64_t > val_key ( height ) ;
MDB_val_copy < uint8_t > val_value ( version ) ;
2016-01-07 13:23:12 +00:00
int result ;
result = mdb_put ( * txn_ptr , m_hf_versions , & val_key , & val_value , MDB_APPEND ) ;
if ( result = = MDB_KEYEXIST )
result = mdb_put ( * txn_ptr , m_hf_versions , & val_key , & val_value , 0 ) ;
if ( result )
2016-03-04 05:27:13 +00:00
throw1 ( DB_ERROR ( lmdb_error ( " Error adding hard fork version to db transaction: " , result ) . c_str ( ) ) ) ;
2015-09-20 17:41:38 +00:00
2016-02-08 12:58:08 +00:00
TXN_BLOCK_POSTFIX_SUCCESS ( ) ;
2015-09-20 17:41:38 +00:00
}
uint8_t BlockchainLMDB : : get_hard_fork_version ( uint64_t height ) const
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
check_open ( ) ;
2015-12-14 18:47:13 +00:00
TXN_PREFIX_RDONLY ( ) ;
2016-02-18 12:09:57 +00:00
RCURSOR ( hf_versions ) ;
2015-09-20 17:41:38 +00:00
MDB_val_copy < uint64_t > val_key ( height ) ;
MDB_val val_ret ;
2016-02-18 12:09:57 +00:00
auto result = mdb_cursor_get ( m_cur_hf_versions , & val_key , & val_ret , MDB_SET ) ;
2015-09-20 17:41:38 +00:00
if ( result = = MDB_NOTFOUND | | result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Error attempting to retrieve a hard fork version at height " + boost : : lexical_cast < std : : string > ( height ) + " from the db: " , result ) . c_str ( ) ) ) ;
2015-09-20 17:41:38 +00:00
2015-12-14 18:47:13 +00:00
uint8_t ret = * ( const uint8_t * ) val_ret . mv_data ;
2016-02-18 12:09:57 +00:00
TXN_POSTFIX_RDONLY ( ) ;
2015-12-10 01:42:36 +00:00
return ret ;
2015-09-20 17:41:38 +00:00
}
2015-12-26 22:27:35 +00:00
bool BlockchainLMDB : : is_read_only ( ) const
{
unsigned int flags ;
auto result = mdb_env_get_flags ( m_env , & flags ) ;
if ( result )
2016-03-04 05:27:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Error getting database environment info: " , result ) . c_str ( ) ) ) ;
2015-12-26 22:27:35 +00:00
if ( flags & MDB_RDONLY )
return true ;
return false ;
}
2015-12-06 20:48:17 +00:00
void BlockchainLMDB : : fixup ( )
{
2015-12-26 22:27:35 +00:00
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2015-12-06 20:48:17 +00:00
// Always call parent as well
BlockchainDB : : fixup ( ) ;
}
2016-04-06 17:05:20 +00:00
# define RENAME_DB(name) \
k . mv_data = ( void * ) name ; \
k . mv_size = sizeof ( name ) - 1 ; \
result = mdb_cursor_open ( txn , 1 , & c_cur ) ; \
if ( result ) \
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for " name " : " , result ) . c_str ( ) ) ) ; \
result = mdb_cursor_get ( c_cur , & k , NULL , MDB_SET_KEY ) ; \
if ( result ) \
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get DB record for " name " : " , result ) . c_str ( ) ) ) ; \
ptr = ( char * ) k . mv_data ; \
ptr [ sizeof ( name ) - 2 ] = ' s '
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
# define LOGIF(y) if (ELPP->vRegistry()->allowed(y, MONERO_DEFAULT_LOG_CATEGORY))
2016-04-06 17:05:20 +00:00
void BlockchainLMDB : : migrate_0_1 ( )
{
LOG_PRINT_L3 ( " BlockchainLMDB:: " < < __func__ ) ;
2017-01-14 19:27:52 +00:00
uint64_t i , z , m_height ;
2016-04-06 17:05:20 +00:00
int result ;
mdb_txn_safe txn ( false ) ;
MDB_val k , v ;
char * ptr ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MLOG_YELLOW ( el : : Level : : Info , " Migrating blockchain from DB version 0 to 1 - this may take a while: " ) ;
MINFO ( " updating blocks, hf_versions, outputs, txs, and spent_keys tables... " ) ;
2016-04-06 17:05:20 +00:00
do {
2017-01-14 19:27:52 +00:00
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
MDB_stat db_stats ;
if ( ( result = mdb_stat ( txn , m_blocks , & db_stats ) ) )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to query m_blocks: " , result ) . c_str ( ) ) ) ;
m_height = db_stats . ms_entries ;
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MINFO ( " Total number of blocks: " < < m_height ) ;
MINFO ( " block migration will update block_heights, block_info, and hf_versions... " ) ;
2017-01-14 19:27:52 +00:00
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
MINFO ( " migrating block_heights: " ) ;
2016-04-06 17:05:20 +00:00
MDB_dbi o_heights ;
unsigned int flags ;
result = mdb_dbi_flags ( txn , m_block_heights , & flags ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to retrieve block_heights flags: " , result ) . c_str ( ) ) ) ;
/* if the flags are what we expect, this table has already been migrated */
if ( ( flags & ( MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED ) ) = = ( MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED ) ) {
txn . abort ( ) ;
LOG_PRINT_L1 ( " block_heights already migrated " ) ;
break ;
}
/* the block_heights table name is the same but the old version and new version
* have incompatible DB flags . Create a new table with the right flags . We want
* the name to be similar to the old name so that it will occupy the same location
* in the DB .
*/
o_heights = m_block_heights ;
lmdb_db_open ( txn , " block_heightr " , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_block_heights , " Failed to open db handle for block_heightr " ) ;
mdb_set_dupsort ( txn , m_block_heights , compare_hash32 ) ;
MDB_cursor * c_old , * c_cur ;
blk_height bh ;
MDB_val_set ( nv , bh ) ;
/* old table was k(hash), v(height).
* new table is DUPFIXED , k ( zeroval ) , v { hash , height } .
*/
i = 0 ;
z = m_height ;
while ( 1 ) {
if ( ! ( i % 2000 ) ) {
if ( i ) {
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
LOGIF ( el : : Level : : Info ) {
2016-04-06 17:05:20 +00:00
std : : cout < < i < < " / " < < z < < " \r " < < std : : flush ;
}
txn . commit ( ) ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
}
result = mdb_cursor_open ( txn , m_block_heights , & c_cur ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_heightr: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , o_heights , & c_old ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_heights: " , result ) . c_str ( ) ) ) ;
if ( ! i ) {
MDB_stat ms ;
mdb_stat ( txn , m_block_heights , & ms ) ;
i = ms . ms_entries ;
}
}
result = mdb_cursor_get ( c_old , & k , & v , MDB_NEXT ) ;
if ( result = = MDB_NOTFOUND ) {
txn . commit ( ) ;
break ;
}
else if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_heights: " , result ) . c_str ( ) ) ) ;
bh . bh_hash = * ( crypto : : hash * ) k . mv_data ;
bh . bh_height = * ( uint64_t * ) v . mv_data ;
result = mdb_cursor_put ( c_cur , ( MDB_val * ) & zerokval , & nv , MDB_APPENDDUP ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to put a record into block_heightr: " , result ) . c_str ( ) ) ) ;
/* we delete the old records immediately, so the overall DB and mapsize should not grow.
* This is a little slower than just letting mdb_drop ( ) delete it all at the end , but
* it saves a significant amount of disk space .
*/
result = mdb_cursor_del ( c_old , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_heights: " , result ) . c_str ( ) ) ) ;
i + + ;
}
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
/* Delete the old table */
result = mdb_drop ( txn , o_heights , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete old block_heights table: " , result ) . c_str ( ) ) ) ;
RENAME_DB ( " block_heightr " ) ;
/* close and reopen to get old dbi slot back */
mdb_dbi_close ( m_env , m_block_heights ) ;
lmdb_db_open ( txn , " block_heights " , MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED , m_block_heights , " Failed to open db handle for block_heights " ) ;
mdb_set_dupsort ( txn , m_block_heights , compare_hash32 ) ;
txn . commit ( ) ;
} while ( 0 ) ;
/* old tables are k(height), v(value).
* new table is DUPFIXED , k ( zeroval ) , v { height , values . . . } .
*/
do {
LOG_PRINT_L1 ( " migrating block info: " ) ;
MDB_dbi coins ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
result = mdb_dbi_open ( txn , " block_coins " , 0 , & coins ) ;
if ( result = = MDB_NOTFOUND ) {
txn . abort ( ) ;
LOG_PRINT_L1 ( " block_info already migrated " ) ;
break ;
}
MDB_dbi diffs , hashes , sizes , timestamps ;
mdb_block_info bi ;
MDB_val_set ( nv , bi ) ;
lmdb_db_open ( txn , " block_diffs " , 0 , diffs , " Failed to open db handle for block_diffs " ) ;
lmdb_db_open ( txn , " block_hashes " , 0 , hashes , " Failed to open db handle for block_hashes " ) ;
lmdb_db_open ( txn , " block_sizes " , 0 , sizes , " Failed to open db handle for block_sizes " ) ;
lmdb_db_open ( txn , " block_timestamps " , 0 , timestamps , " Failed to open db handle for block_timestamps " ) ;
MDB_cursor * c_cur , * c_coins , * c_diffs , * c_hashes , * c_sizes , * c_timestamps ;
i = 0 ;
z = m_height ;
while ( 1 ) {
MDB_val k , v ;
if ( ! ( i % 2000 ) ) {
if ( i ) {
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
LOGIF ( el : : Level : : Info ) {
2016-04-06 17:05:20 +00:00
std : : cout < < i < < " / " < < z < < " \r " < < std : : flush ;
}
txn . commit ( ) ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
}
result = mdb_cursor_open ( txn , m_block_info , & c_cur ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_info: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , coins , & c_coins ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_coins: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , diffs , & c_diffs ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_diffs: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , hashes , & c_hashes ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_hashes: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , sizes , & c_sizes ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_coins: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , timestamps , & c_timestamps ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for block_timestamps: " , result ) . c_str ( ) ) ) ;
if ( ! i ) {
MDB_stat ms ;
mdb_stat ( txn , m_block_info , & ms ) ;
i = ms . ms_entries ;
}
}
result = mdb_cursor_get ( c_coins , & k , & v , MDB_NEXT ) ;
if ( result = = MDB_NOTFOUND ) {
break ;
} else if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_coins: " , result ) . c_str ( ) ) ) ;
bi . bi_height = * ( uint64_t * ) k . mv_data ;
bi . bi_coins = * ( uint64_t * ) v . mv_data ;
result = mdb_cursor_get ( c_diffs , & k , & v , MDB_NEXT ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_diffs: " , result ) . c_str ( ) ) ) ;
bi . bi_diff = * ( uint64_t * ) v . mv_data ;
result = mdb_cursor_get ( c_hashes , & k , & v , MDB_NEXT ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_hashes: " , result ) . c_str ( ) ) ) ;
bi . bi_hash = * ( crypto : : hash * ) v . mv_data ;
result = mdb_cursor_get ( c_sizes , & k , & v , MDB_NEXT ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_sizes: " , result ) . c_str ( ) ) ) ;
if ( v . mv_size = = sizeof ( uint32_t ) )
bi . bi_size = * ( uint32_t * ) v . mv_data ;
else
bi . bi_size = * ( uint64_t * ) v . mv_data ; // this is a 32/64 compat bug in version 0
result = mdb_cursor_get ( c_timestamps , & k , & v , MDB_NEXT ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from block_timestamps: " , result ) . c_str ( ) ) ) ;
bi . bi_timestamp = * ( uint64_t * ) v . mv_data ;
result = mdb_cursor_put ( c_cur , ( MDB_val * ) & zerokval , & nv , MDB_APPENDDUP ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to put a record into block_info: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_coins , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_coins: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_diffs , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_diffs: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_hashes , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_hashes: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_sizes , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_sizes: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_timestamps , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from block_timestamps: " , result ) . c_str ( ) ) ) ;
i + + ;
}
mdb_cursor_close ( c_timestamps ) ;
mdb_cursor_close ( c_sizes ) ;
mdb_cursor_close ( c_hashes ) ;
mdb_cursor_close ( c_diffs ) ;
mdb_cursor_close ( c_coins ) ;
result = mdb_drop ( txn , timestamps , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete block_timestamps from the db: " , result ) . c_str ( ) ) ) ;
result = mdb_drop ( txn , sizes , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete block_sizes from the db: " , result ) . c_str ( ) ) ) ;
result = mdb_drop ( txn , hashes , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete block_hashes from the db: " , result ) . c_str ( ) ) ) ;
result = mdb_drop ( txn , diffs , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete block_diffs from the db: " , result ) . c_str ( ) ) ) ;
result = mdb_drop ( txn , coins , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete block_coins from the db: " , result ) . c_str ( ) ) ) ;
txn . commit ( ) ;
} while ( 0 ) ;
do {
LOG_PRINT_L1 ( " migrating hf_versions: " ) ;
MDB_dbi o_hfv ;
unsigned int flags ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
result = mdb_dbi_flags ( txn , m_hf_versions , & flags ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to retrieve hf_versions flags: " , result ) . c_str ( ) ) ) ;
/* if the flags are what we expect, this table has already been migrated */
if ( flags & MDB_INTEGERKEY ) {
txn . abort ( ) ;
LOG_PRINT_L1 ( " hf_versions already migrated " ) ;
break ;
}
/* the hf_versions table name is the same but the old version and new version
* have incompatible DB flags . Create a new table with the right flags .
*/
o_hfv = m_hf_versions ;
lmdb_db_open ( txn , " hf_versionr " , MDB_INTEGERKEY | MDB_CREATE , m_hf_versions , " Failed to open db handle for hf_versionr " ) ;
MDB_cursor * c_old , * c_cur ;
i = 0 ;
z = m_height ;
while ( 1 ) {
if ( ! ( i % 2000 ) ) {
if ( i ) {
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
LOGIF ( el : : Level : : Info ) {
2016-04-06 17:05:20 +00:00
std : : cout < < i < < " / " < < z < < " \r " < < std : : flush ;
}
txn . commit ( ) ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
}
result = mdb_cursor_open ( txn , m_hf_versions , & c_cur ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for spent_keyr: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_open ( txn , o_hfv , & c_old ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for spent_keys: " , result ) . c_str ( ) ) ) ;
if ( ! i ) {
MDB_stat ms ;
mdb_stat ( txn , m_hf_versions , & ms ) ;
i = ms . ms_entries ;
}
}
result = mdb_cursor_get ( c_old , & k , & v , MDB_NEXT ) ;
if ( result = = MDB_NOTFOUND ) {
txn . commit ( ) ;
break ;
}
else if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from hf_versions: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_put ( c_cur , & k , & v , MDB_APPEND ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to put a record into hf_versionr: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_old , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from hf_versions: " , result ) . c_str ( ) ) ) ;
i + + ;
}
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
/* Delete the old table */
result = mdb_drop ( txn , o_hfv , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete old hf_versions table: " , result ) . c_str ( ) ) ) ;
RENAME_DB ( " hf_versionr " ) ;
mdb_dbi_close ( m_env , m_hf_versions ) ;
lmdb_db_open ( txn , " hf_versions " , MDB_INTEGERKEY , m_hf_versions , " Failed to open db handle for hf_versions " ) ;
txn . commit ( ) ;
} while ( 0 ) ;
do {
2016-04-10 16:25:13 +00:00
LOG_PRINT_L1 ( " deleting old indices: " ) ;
2016-04-06 17:05:20 +00:00
2016-04-10 16:25:13 +00:00
/* Delete all other tables, we're just going to recreate them */
MDB_dbi dbi ;
2016-04-06 17:05:20 +00:00
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
result = mdb_dbi_open ( txn , " tx_unlocks " , 0 , & dbi ) ;
2016-04-06 17:05:20 +00:00
if ( result = = MDB_NOTFOUND ) {
2016-04-10 16:25:13 +00:00
txn . abort ( ) ;
LOG_PRINT_L1 ( " old indices already deleted " ) ;
2016-04-06 17:05:20 +00:00
break ;
}
2016-04-10 16:25:13 +00:00
txn . abort ( ) ;
# define DELETE_DB(x) do { \
LOG_PRINT_L1 ( " " x " : " ) ; \
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ; \
if ( result ) \
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ; \
result = mdb_dbi_open ( txn , x , 0 , & dbi ) ; \
if ( ! result ) { \
result = mdb_drop ( txn , dbi , 1 ) ; \
if ( result ) \
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete " x " : " , result ) . c_str ( ) ) ) ; \
txn . commit ( ) ; \
} } while ( 0 )
DELETE_DB ( " tx_heights " ) ;
DELETE_DB ( " output_txs " ) ;
DELETE_DB ( " output_indices " ) ;
DELETE_DB ( " output_keys " ) ;
DELETE_DB ( " spent_keys " ) ;
DELETE_DB ( " output_amounts " ) ;
DELETE_DB ( " tx_outputs " ) ;
DELETE_DB ( " tx_unlocks " ) ;
/* reopen new DBs with correct flags */
2016-04-06 17:05:20 +00:00
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
lmdb_db_open ( txn , LMDB_OUTPUT_TXS , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_output_txs , " Failed to open db handle for m_output_txs " ) ;
2016-04-06 17:05:20 +00:00
mdb_set_dupsort ( txn , m_output_txs , compare_uint64 ) ;
2016-04-10 16:25:13 +00:00
lmdb_db_open ( txn , LMDB_TX_OUTPUTS , MDB_INTEGERKEY | MDB_CREATE , m_tx_outputs , " Failed to open db handle for m_tx_outputs " ) ;
lmdb_db_open ( txn , LMDB_SPENT_KEYS , MDB_INTEGERKEY | MDB_CREATE | MDB_DUPSORT | MDB_DUPFIXED , m_spent_keys , " Failed to open db handle for m_spent_keys " ) ;
mdb_set_dupsort ( txn , m_spent_keys , compare_hash32 ) ;
lmdb_db_open ( txn , LMDB_OUTPUT_AMOUNTS , MDB_INTEGERKEY | MDB_DUPSORT | MDB_DUPFIXED | MDB_CREATE , m_output_amounts , " Failed to open db handle for m_output_amounts " ) ;
mdb_set_dupsort ( txn , m_output_amounts , compare_uint64 ) ;
2016-04-06 17:05:20 +00:00
txn . commit ( ) ;
} while ( 0 ) ;
do {
2016-04-10 16:25:13 +00:00
LOG_PRINT_L1 ( " migrating txs and outputs: " ) ;
2016-04-06 17:05:20 +00:00
2016-04-10 16:25:13 +00:00
unsigned int flags ;
2016-04-06 17:05:20 +00:00
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
result = mdb_dbi_flags ( txn , m_txs , & flags ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to retrieve txs flags: " , result ) . c_str ( ) ) ) ;
/* if the flags are what we expect, this table has already been migrated */
if ( flags & MDB_INTEGERKEY ) {
2016-04-06 17:05:20 +00:00
txn . abort ( ) ;
2016-04-10 16:25:13 +00:00
LOG_PRINT_L1 ( " txs already migrated " ) ;
2016-04-06 17:05:20 +00:00
break ;
}
2016-04-10 16:25:13 +00:00
MDB_dbi o_txs ;
2016-04-06 17:05:20 +00:00
blobdata bd ;
block b ;
2016-04-10 16:25:13 +00:00
MDB_val hk ;
2016-04-06 17:05:20 +00:00
2016-04-10 16:25:13 +00:00
o_txs = m_txs ;
mdb_set_compare ( txn , o_txs , compare_hash32 ) ;
lmdb_db_open ( txn , " txr " , MDB_INTEGERKEY | MDB_CREATE , m_txs , " Failed to open db handle for txr " ) ;
txn . commit ( ) ;
MDB_cursor * c_blocks , * c_txs , * c_props , * c_cur ;
2016-04-06 17:05:20 +00:00
i = 0 ;
2016-04-10 16:25:13 +00:00
z = m_height ;
hk . mv_size = sizeof ( crypto : : hash ) ;
set_batch_transactions ( true ) ;
batch_start ( 1000 ) ;
txn . m_txn = m_write_txn - > m_txn ;
m_height = 0 ;
2016-04-06 17:05:20 +00:00
while ( 1 ) {
2016-04-10 16:25:13 +00:00
if ( ! ( i % 1000 ) ) {
2016-04-06 17:05:20 +00:00
if ( i ) {
Change logging to easylogging++
This replaces the epee and data_loggers logging systems with
a single one, and also adds filename:line and explicit severity
levels. Categories may be defined, and logging severity set
by category (or set of categories). epee style 0-4 log level
maps to a sensible severity configuration. Log files now also
rotate when reaching 100 MB.
To select which logs to output, use the MONERO_LOGS environment
variable, with a comma separated list of categories (globs are
supported), with their requested severity level after a colon.
If a log matches more than one such setting, the last one in
the configuration string applies. A few examples:
This one is (mostly) silent, only outputting fatal errors:
MONERO_LOGS=*:FATAL
This one is very verbose:
MONERO_LOGS=*:TRACE
This one is totally silent (logwise):
MONERO_LOGS=""
This one outputs all errors and warnings, except for the
"verify" category, which prints just fatal errors (the verify
category is used for logs about incoming transactions and
blocks, and it is expected that some/many will fail to verify,
hence we don't want the spam):
MONERO_LOGS=*:WARNING,verify:FATAL
Log levels are, in decreasing order of priority:
FATAL, ERROR, WARNING, INFO, DEBUG, TRACE
Subcategories may be added using prefixes and globs. This
example will output net.p2p logs at the TRACE level, but all
other net* logs only at INFO:
MONERO_LOGS=*:ERROR,net*:INFO,net.p2p:TRACE
Logs which are intended for the user (which Monero was using
a lot through epee, but really isn't a nice way to go things)
should use the "global" category. There are a few helper macros
for using this category, eg: MGINFO("this shows up by default")
or MGINFO_RED("this is red"), to try to keep a similar look
and feel for now.
Existing epee log macros still exist, and map to the new log
levels, but since they're used as a "user facing" UI element
as much as a logging system, they often don't map well to log
severities (ie, a log level 0 log may be an error, or may be
something we want the user to see, such as an important info).
In those cases, I tried to use the new macros. In other cases,
I left the existing macros in. When modifying logs, it is
probably best to switch to the new macros with explicit levels.
The --log-level options and set_log commands now also accept
category settings, in addition to the epee style log levels.
2017-01-01 16:34:23 +00:00
LOGIF ( el : : Level : : Info ) {
2016-04-06 17:05:20 +00:00
std : : cout < < i < < " / " < < z < < " \r " < < std : : flush ;
}
MDB_val_set ( pk , " txblk " ) ;
2016-04-10 16:25:13 +00:00
MDB_val_set ( pv , m_height ) ;
2016-04-06 17:05:20 +00:00
result = mdb_cursor_put ( c_props , & pk , & pv , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to update txblk property: " , result ) . c_str ( ) ) ) ;
txn . commit ( ) ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
m_write_txn - > m_txn = txn . m_txn ;
m_write_batch_txn - > m_txn = txn . m_txn ;
memset ( & m_wcursors , 0 , sizeof ( m_wcursors ) ) ;
2016-04-06 17:05:20 +00:00
}
2016-04-10 16:25:13 +00:00
result = mdb_cursor_open ( txn , m_blocks , & c_blocks ) ;
2016-04-06 17:05:20 +00:00
if ( result )
2016-04-10 16:25:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for blocks: " , result ) . c_str ( ) ) ) ;
2016-04-06 17:05:20 +00:00
result = mdb_cursor_open ( txn , m_properties , & c_props ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for properties: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
result = mdb_cursor_open ( txn , o_txs , & c_txs ) ;
2016-04-06 17:05:20 +00:00
if ( result )
2016-04-10 16:25:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to open a cursor for txs: " , result ) . c_str ( ) ) ) ;
2016-04-06 17:05:20 +00:00
if ( ! i ) {
MDB_stat ms ;
2016-04-10 16:25:13 +00:00
mdb_stat ( txn , m_txs , & ms ) ;
2017-02-18 18:46:26 +00:00
i = ms . ms_entries ;
2016-04-06 17:05:20 +00:00
if ( i ) {
MDB_val_set ( pk , " txblk " ) ;
2016-04-10 16:25:13 +00:00
result = mdb_cursor_get ( c_props , & pk , & k , MDB_SET ) ;
2016-04-06 17:05:20 +00:00
if ( result )
2016-04-10 16:25:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from properties: " , result ) . c_str ( ) ) ) ;
m_height = * ( uint64_t * ) k . mv_data ;
2016-04-06 17:05:20 +00:00
}
}
if ( i ) {
result = mdb_cursor_get ( c_blocks , & k , & v , MDB_SET ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from blocks: " , result ) . c_str ( ) ) ) ;
}
}
result = mdb_cursor_get ( c_blocks , & k , & v , MDB_NEXT ) ;
if ( result = = MDB_NOTFOUND ) {
MDB_val_set ( pk , " txblk " ) ;
2016-07-26 21:39:51 +00:00
result = mdb_cursor_get ( c_props , & pk , & v , MDB_SET ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from props: " , result ) . c_str ( ) ) ) ;
result = mdb_cursor_del ( c_props , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete a record from props: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
batch_stop ( ) ;
2016-04-06 17:05:20 +00:00
break ;
} else if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get a record from blocks: " , result ) . c_str ( ) ) ) ;
2016-04-10 16:25:13 +00:00
2016-04-06 17:05:20 +00:00
bd . assign ( reinterpret_cast < char * > ( v . mv_data ) , v . mv_size ) ;
if ( ! parse_and_validate_block_from_blob ( bd , b ) )
throw0 ( DB_ERROR ( " Failed to parse block from blob retrieved from the db " ) ) ;
2016-04-10 16:25:13 +00:00
add_transaction ( null_hash , b . miner_tx ) ;
for ( unsigned int j = 0 ; j < b . tx_hashes . size ( ) ; j + + ) {
transaction tx ;
hk . mv_data = & b . tx_hashes [ j ] ;
result = mdb_cursor_get ( c_txs , & hk , & v , MDB_SET ) ;
2016-04-06 17:05:20 +00:00
if ( result )
2016-04-10 16:25:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get record from txs: " , result ) . c_str ( ) ) ) ;
2016-04-06 17:05:20 +00:00
bd . assign ( reinterpret_cast < char * > ( v . mv_data ) , v . mv_size ) ;
if ( ! parse_and_validate_tx_from_blob ( bd , tx ) )
throw0 ( DB_ERROR ( " Failed to parse tx from blob retrieved from the db " ) ) ;
2016-04-10 16:25:13 +00:00
add_transaction ( null_hash , tx , & b . tx_hashes [ j ] ) ;
result = mdb_cursor_del ( c_txs , 0 ) ;
2016-04-06 17:05:20 +00:00
if ( result )
2016-04-10 16:25:13 +00:00
throw0 ( DB_ERROR ( lmdb_error ( " Failed to get record from txs: " , result ) . c_str ( ) ) ) ;
}
2016-04-06 17:05:20 +00:00
i + + ;
2016-04-10 16:25:13 +00:00
m_height = i ;
2016-04-06 17:05:20 +00:00
}
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
result = mdb_drop ( txn , o_txs , 1 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to delete txs from the db: " , result ) . c_str ( ) ) ) ;
RENAME_DB ( " txr " ) ;
mdb_dbi_close ( m_env , m_txs ) ;
lmdb_db_open ( txn , " txs " , MDB_INTEGERKEY , m_txs , " Failed to open db handle for txs " ) ;
txn . commit ( ) ;
} while ( 0 ) ;
uint32_t version = 1 ;
v . mv_data = ( void * ) & version ;
v . mv_size = sizeof ( version ) ;
MDB_val_copy < const char * > vk ( " version " ) ;
result = mdb_txn_begin ( m_env , NULL , 0 , txn ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to create a transaction for the db: " , result ) . c_str ( ) ) ) ;
result = mdb_put ( txn , m_properties , & vk , & v , 0 ) ;
if ( result )
throw0 ( DB_ERROR ( lmdb_error ( " Failed to update version for the db: " , result ) . c_str ( ) ) ) ;
txn . commit ( ) ;
}
void BlockchainLMDB : : migrate ( const uint32_t oldversion )
{
switch ( oldversion ) {
case 0 :
migrate_0_1 ( ) ; /* FALLTHRU */
default :
;
}
}
2014-10-21 20:33:43 +00:00
} // namespace cryptonote