hardfork: most state now saved to the DB
There will be a delay on first load of an existing blockchain as it gets reparsed for this state data.
This commit is contained in:
parent
0a54c3a553
commit
5b11a89a76
10 changed files with 537 additions and 258 deletions
|
@ -118,6 +118,9 @@ const char* const BDB_OUTPUT_KEYS = "output_keys";
|
|||
|
||||
const char* const BDB_SPENT_KEYS = "spent_keys";
|
||||
|
||||
const char* const BDB_HF_STARTING_HEIGHTS = "hf_starting_heights";
|
||||
const char* const BDB_HF_VERSIONS = "hf_versions";
|
||||
|
||||
const unsigned int MB = 1024 * 1024;
|
||||
// ND: FIXME: db keeps running out of locks when doing full syncs. Possible bug??? Set it to 5K for now.
|
||||
const unsigned int DB_MAX_LOCKS = 5000;
|
||||
|
@ -667,6 +670,9 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags)
|
|||
|
||||
m_spent_keys = new Db(m_env, 0);
|
||||
|
||||
m_hf_starting_heights = new Db(m_env, 0);
|
||||
m_hf_versions = new Db(m_env, 0);
|
||||
|
||||
// Tell DB about Dbs that need duplicate support
|
||||
// Note: no need to tell about sorting,
|
||||
// as the default is insertion order, which we want
|
||||
|
@ -684,6 +690,9 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags)
|
|||
m_output_indices->set_re_len(sizeof(uint64_t));
|
||||
m_output_keys->set_re_len(sizeof(output_data_t));
|
||||
|
||||
m_hf_starting_heights->set_re_len(sizeof(uint64_t));
|
||||
m_hf_versions->set_re_len(sizeof(uint8_t));
|
||||
|
||||
//TODO: Find out if we need to do Db::set_flags(DB_RENUMBER)
|
||||
// for the RECNO databases. We shouldn't as we're only
|
||||
// inserting/removing from the end, but we'll see.
|
||||
|
@ -713,6 +722,9 @@ void BlockchainBDB::open(const std::string& filename, const int db_flags)
|
|||
|
||||
m_spent_keys->open(txn, BDB_SPENT_KEYS, NULL, DB_HASH, DB_CREATE, 0);
|
||||
|
||||
m_hf_starting_heights->open(txn, BDB_HF_STARTING_HEIGHTS, NULL, DB_RECNO, DB_CREATE, 0);
|
||||
m_hf_versions->open(txn, BDB_HF_VERSIONS, NULL, DB_RECNO, DB_CREATE, 0);
|
||||
|
||||
txn.commit();
|
||||
|
||||
DB_BTREE_STAT* stats;
|
||||
|
@ -785,6 +797,9 @@ void BlockchainBDB::sync()
|
|||
m_output_keys->sync(0);
|
||||
|
||||
m_spent_keys->sync(0);
|
||||
|
||||
m_hf_starting_heights->sync(0);
|
||||
m_hf_versions->sync(0);
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
|
@ -859,6 +874,12 @@ std::vector<std::string> BlockchainBDB::get_filenames() const
|
|||
m_spent_keys->get_dbname(pfname, pdbname);
|
||||
filenames.push_back(fname);
|
||||
|
||||
m_hf_starting_heights->get_dbname(pfname, pdbname);
|
||||
filenames.push_back(fname);
|
||||
|
||||
m_hf_versions->get_dbname(pfname, pdbname);
|
||||
filenames.push_back(fname);
|
||||
|
||||
std::vector<std::string> full_paths;
|
||||
|
||||
for (auto& filename : filenames)
|
||||
|
@ -1839,6 +1860,60 @@ void BlockchainBDB::get_output_tx_and_index(const uint64_t& amount, const std::v
|
|||
LOG_PRINT_L3("db3: " << db3);
|
||||
}
|
||||
|
||||
void BlockchainBDB::set_hard_fork_starting_height(uint8_t version, uint64_t height)
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainBDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
Dbt_copy<uint8_t> val_key(version);
|
||||
Dbt_copy<uint64_t> val(height);
|
||||
if (m_hf_starting_heights->put(DB_DEFAULT_TX, &val_key, &val, 0))
|
||||
throw1(DB_ERROR("Error adding hard fork starting height to db transaction."));
|
||||
}
|
||||
|
||||
uint64_t BlockchainBDB::get_hard_fork_starting_height(uint8_t version) const
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainBDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
Dbt_copy<uint8_t> key(version);
|
||||
Dbt_copy<uint64_t> result;
|
||||
|
||||
auto get_result = m_hf_starting_heights->get(DB_DEFAULT_TX, &key, &result, 0);
|
||||
if (get_result == DB_NOTFOUND)
|
||||
return std::numeric_limits<uint64_t>::max();
|
||||
else if (get_result)
|
||||
throw0(DB_ERROR("Error attempting to retrieve hard fork starting height from the db"));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void BlockchainBDB::set_hard_fork_version(uint64_t height, uint8_t version)
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainBDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
Dbt_copy<uint64_t> val_key(height);
|
||||
Dbt_copy<uint8_t> val(version);
|
||||
if (m_hf_versions->put(DB_DEFAULT_TX, &val_key, &val, 0))
|
||||
throw1(DB_ERROR("Error adding hard fork version to db transaction."));
|
||||
}
|
||||
|
||||
uint8_t BlockchainBDB::get_hard_fork_version(uint64_t height) const
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainBDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
Dbt_copy<uint64_t> key(height);
|
||||
Dbt_copy<uint8_t> result;
|
||||
|
||||
auto get_result = m_hf_versions->get(DB_DEFAULT_TX, &key, &result, 0);
|
||||
if (get_result == DB_NOTFOUND || get_result)
|
||||
throw0(DB_ERROR("Error attempting to retrieve hard fork version from the db"));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void BlockchainBDB::checkpoint_worker() const
|
||||
{
|
||||
LOG_PRINT_L0("Entering BDB checkpoint thread.")
|
||||
|
|
|
@ -374,6 +374,13 @@ private:
|
|||
virtual void remove_spent_key(const crypto::key_image& k_image);
|
||||
|
||||
void get_output_global_indices(const uint64_t& amount, const std::vector<uint64_t> &offsets, std::vector<uint64_t> &global_indices);
|
||||
|
||||
// Hard fork related storage
|
||||
virtual void set_hard_fork_starting_height(uint8_t version, uint64_t height);
|
||||
virtual uint64_t get_hard_fork_starting_height(uint8_t version) const;
|
||||
virtual void set_hard_fork_version(uint64_t height, uint8_t version);
|
||||
virtual uint8_t get_hard_fork_version(uint64_t height) const;
|
||||
|
||||
/**
|
||||
* @brief convert a tx output to a blob for storage
|
||||
*
|
||||
|
@ -430,6 +437,9 @@ private:
|
|||
|
||||
Db* m_spent_keys;
|
||||
|
||||
Db* m_hf_starting_heights;
|
||||
Db* m_hf_versions;
|
||||
|
||||
uint64_t m_height;
|
||||
uint64_t m_num_outputs;
|
||||
std::string m_folder;
|
||||
|
|
|
@ -493,6 +493,12 @@ public:
|
|||
// returns true if key image <img> is present in spent key images storage
|
||||
virtual bool has_key_image(const crypto::key_image& img) const = 0;
|
||||
|
||||
// Hard fork related storage
|
||||
virtual void set_hard_fork_starting_height(uint8_t version, uint64_t height) = 0;
|
||||
virtual uint64_t get_hard_fork_starting_height(uint8_t version) const = 0;
|
||||
virtual void set_hard_fork_version(uint64_t height, uint8_t version) = 0;
|
||||
virtual uint8_t get_hard_fork_version(uint64_t height) const = 0;
|
||||
|
||||
void set_auto_remove_logs(bool auto_remove) { m_auto_remove_logs = auto_remove; }
|
||||
|
||||
bool m_open;
|
||||
|
|
|
@ -131,6 +131,15 @@ auto compare_uint64 = [](const MDB_val *a, const MDB_val *b)
|
|||
else return 1;
|
||||
};
|
||||
|
||||
auto compare_uint8 = [](const MDB_val *a, const MDB_val *b)
|
||||
{
|
||||
const uint8_t va = *(const uint8_t*)a->mv_data;
|
||||
const uint8_t vb = *(const uint8_t*)b->mv_data;
|
||||
if (va < vb) return -1;
|
||||
else if (va == vb) return 0;
|
||||
else return 1;
|
||||
};
|
||||
|
||||
int compare_hash32(const MDB_val *a, const MDB_val *b)
|
||||
{
|
||||
uint32_t *va = (uint32_t*) a->mv_data;
|
||||
|
@ -166,6 +175,9 @@ const char* const LMDB_OUTPUTS = "outputs";
|
|||
const char* const LMDB_OUTPUT_GINDICES = "output_gindices";
|
||||
const char* const LMDB_SPENT_KEYS = "spent_keys";
|
||||
|
||||
const char* const LMDB_HF_STARTING_HEIGHTS = "hf_starting_heights";
|
||||
const char* const LMDB_HF_VERSIONS = "hf_versions";
|
||||
|
||||
inline void lmdb_db_open(MDB_txn* txn, const char* name, int flags, MDB_dbi& dbi, const std::string& error_string)
|
||||
{
|
||||
if (mdb_dbi_open(txn, name, flags, &dbi))
|
||||
|
@ -1022,6 +1034,9 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags)
|
|||
|
||||
lmdb_db_open(txn, LMDB_SPENT_KEYS, MDB_CREATE, m_spent_keys, "Failed to open db handle for m_spent_keys");
|
||||
|
||||
lmdb_db_open(txn, LMDB_HF_STARTING_HEIGHTS, MDB_CREATE, m_hf_starting_heights, "Failed to open db handle for m_hf_starting_heights");
|
||||
lmdb_db_open(txn, LMDB_HF_VERSIONS, MDB_CREATE, m_hf_versions, "Failed to open db handle for m_hf_versions");
|
||||
|
||||
mdb_set_dupsort(txn, m_output_amounts, compare_uint64);
|
||||
mdb_set_dupsort(txn, m_tx_outputs, compare_uint64);
|
||||
mdb_set_compare(txn, m_spent_keys, compare_hash32);
|
||||
|
@ -1029,6 +1044,8 @@ void BlockchainLMDB::open(const std::string& filename, const int mdb_flags)
|
|||
mdb_set_compare(txn, m_txs, compare_hash32);
|
||||
mdb_set_compare(txn, m_tx_unlocks, compare_hash32);
|
||||
mdb_set_compare(txn, m_tx_heights, compare_hash32);
|
||||
mdb_set_compare(txn, m_hf_starting_heights, compare_uint8);
|
||||
mdb_set_compare(txn, m_hf_versions, compare_uint64);
|
||||
|
||||
// get and keep current height
|
||||
MDB_stat db_stats;
|
||||
|
@ -2347,4 +2364,111 @@ void BlockchainLMDB::get_output_tx_and_index(const uint64_t& amount, const std::
|
|||
LOG_PRINT_L3("db3: " << db3);
|
||||
}
|
||||
|
||||
void BlockchainLMDB::set_hard_fork_starting_height(uint8_t version, uint64_t height)
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
mdb_txn_safe txn;
|
||||
mdb_txn_safe* txn_ptr = &txn;
|
||||
if (m_batch_active)
|
||||
txn_ptr = m_write_txn;
|
||||
else
|
||||
{
|
||||
if (mdb_txn_begin(m_env, NULL, 0, txn))
|
||||
throw0(DB_ERROR("Failed to create a transaction for the db"));
|
||||
txn_ptr = &txn;
|
||||
}
|
||||
|
||||
MDB_val_copy<uint8_t> val_key(version);
|
||||
MDB_val_copy<uint64_t> val_value(height);
|
||||
if (auto result = mdb_put(*txn_ptr, m_hf_starting_heights, &val_key, &val_value, 0))
|
||||
throw1(DB_ERROR(std::string("Error adding hard fork starting height to db transaction: ").append(mdb_strerror(result)).c_str()));
|
||||
|
||||
if (!m_batch_active)
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
uint64_t BlockchainLMDB::get_hard_fork_starting_height(uint8_t version) const
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
mdb_txn_safe txn;
|
||||
mdb_txn_safe* txn_ptr = &txn;
|
||||
if (m_batch_active)
|
||||
txn_ptr = m_write_txn;
|
||||
else
|
||||
{
|
||||
if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn))
|
||||
throw0(DB_ERROR("Failed to create a transaction for the db"));
|
||||
txn_ptr = &txn;
|
||||
}
|
||||
|
||||
MDB_val_copy<uint8_t> val_key(version);
|
||||
MDB_val val_ret;
|
||||
auto result = mdb_get(*txn_ptr, m_hf_starting_heights, &val_key, &val_ret);
|
||||
if (result == MDB_NOTFOUND)
|
||||
return std::numeric_limits<uint64_t>::max();
|
||||
if (result)
|
||||
throw0(DB_ERROR("Error attempting to retrieve a hard fork starting height from the db"));
|
||||
|
||||
if (!m_batch_active)
|
||||
txn.commit();
|
||||
return *(const uint64_t*)val_ret.mv_data;
|
||||
}
|
||||
|
||||
void BlockchainLMDB::set_hard_fork_version(uint64_t height, uint8_t version)
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
//LOG_PRINT_L1("BlockchainLMDB::set_hard_fork_version: batch " << m_batch_active << ", height " << height << ", version " << (int)version);
|
||||
mdb_txn_safe txn;
|
||||
mdb_txn_safe* txn_ptr = &txn;
|
||||
if (m_batch_active)
|
||||
txn_ptr = m_write_txn;
|
||||
else
|
||||
{
|
||||
if (mdb_txn_begin(m_env, NULL, 0, txn))
|
||||
throw0(DB_ERROR("Failed to create a transaction for the db"));
|
||||
txn_ptr = &txn;
|
||||
}
|
||||
|
||||
MDB_val_copy<uint64_t> val_key(height);
|
||||
MDB_val_copy<uint8_t> val_value(version);
|
||||
if (auto result = mdb_put(*txn_ptr, m_hf_versions, &val_key, &val_value, 0))
|
||||
throw1(DB_ERROR(std::string("Error adding hard fork version to db transaction: ").append(mdb_strerror(result)).c_str()));
|
||||
|
||||
if (!m_batch_active)
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
uint8_t BlockchainLMDB::get_hard_fork_version(uint64_t height) const
|
||||
{
|
||||
LOG_PRINT_L3("BlockchainLMDB::" << __func__);
|
||||
check_open();
|
||||
|
||||
mdb_txn_safe txn;
|
||||
mdb_txn_safe* txn_ptr = &txn;
|
||||
if (m_batch_active)
|
||||
txn_ptr = m_write_txn;
|
||||
else
|
||||
{
|
||||
if (mdb_txn_begin(m_env, NULL, MDB_RDONLY, txn))
|
||||
throw0(DB_ERROR("Failed to create a transaction for the db"));
|
||||
txn_ptr = &txn;
|
||||
}
|
||||
|
||||
MDB_val_copy<uint64_t> val_key(height);
|
||||
MDB_val val_ret;
|
||||
auto result = mdb_get(*txn_ptr, m_hf_versions, &val_key, &val_ret);
|
||||
if (result == MDB_NOTFOUND || result)
|
||||
throw0(DB_ERROR("Error attempting to retrieve a hard fork version from the db"));
|
||||
|
||||
if (!m_batch_active)
|
||||
txn.commit();
|
||||
return *(const uint8_t*)val_ret.mv_data;
|
||||
}
|
||||
|
||||
} // namespace cryptonote
|
||||
|
|
|
@ -238,6 +238,12 @@ private:
|
|||
|
||||
virtual void remove_spent_key(const crypto::key_image& k_image);
|
||||
|
||||
// Hard fork
|
||||
virtual void set_hard_fork_starting_height(uint8_t version, uint64_t height);
|
||||
virtual uint64_t get_hard_fork_starting_height(uint8_t version) const;
|
||||
virtual void set_hard_fork_version(uint64_t height, uint8_t version);
|
||||
virtual uint8_t get_hard_fork_version(uint64_t height) const;
|
||||
|
||||
/**
|
||||
* @brief convert a tx output to a blob for storage
|
||||
*
|
||||
|
@ -292,6 +298,9 @@ private:
|
|||
|
||||
MDB_dbi m_spent_keys;
|
||||
|
||||
MDB_dbi m_hf_starting_heights;
|
||||
MDB_dbi m_hf_versions;
|
||||
|
||||
uint64_t m_height;
|
||||
uint64_t m_num_outputs;
|
||||
std::string m_folder;
|
||||
|
|
|
@ -69,10 +69,22 @@ extern "C" void slow_hash_free_state();
|
|||
|
||||
DISABLE_VS_WARNINGS(4267)
|
||||
|
||||
static const struct {
|
||||
uint8_t version;
|
||||
uint64_t height;
|
||||
time_t time;
|
||||
} hard_forks[] = {
|
||||
// version 1 from the start of the blockchain
|
||||
{ 1, 1, 1341378000 },
|
||||
|
||||
// version 2 can start from block 1009827, setup on the 20th of september
|
||||
{ 2, 1009827, 1442763710 },
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------
|
||||
Blockchain::Blockchain(tx_memory_pool& tx_pool) :
|
||||
m_db(), m_tx_pool(tx_pool), m_timestamps_and_difficulties_height(0), m_current_block_cumul_sz_limit(0), m_is_in_checkpoint_zone(false),
|
||||
m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false), m_hardfork(), m_max_prepare_blocks_threads(4), m_db_blocks_per_sync(1), m_db_sync_mode(db_async), m_fast_sync(true)
|
||||
m_is_blockchain_storing(false), m_enforce_dns_checkpoints(false), m_max_prepare_blocks_threads(4), m_db_blocks_per_sync(1), m_db_sync_mode(db_async), m_fast_sync(true)
|
||||
{
|
||||
LOG_PRINT_L3("Blockchain::" << __func__);
|
||||
}
|
||||
|
@ -120,7 +132,7 @@ void Blockchain::serialize(archive_t & ar, const unsigned int version)
|
|||
|
||||
if (version > 12)
|
||||
{
|
||||
ar & m_hardfork;
|
||||
ar & *m_hardfork;
|
||||
}
|
||||
|
||||
LOG_PRINT_L3("Blockchain storage:" << std::endl << "m_blocks: " << m_db->height() << std::endl << "m_blocks_index: " << m_blocks_index.size() << std::endl << "m_transactions: " << m_transactions.size() << std::endl << "dummy_key_images_container: " << dummy_key_images_container.size() << std::endl << "m_alternative_chains: " << m_alternative_chains.size() << std::endl << "m_outputs: " << m_outputs.size() << std::endl << "m_invalid_blocks: " << m_invalid_blocks.size() << std::endl << "m_current_block_cumul_sz_limit: " << m_current_block_cumul_sz_limit);
|
||||
|
@ -275,6 +287,11 @@ bool Blockchain::init(BlockchainDB* db, const bool testnet)
|
|||
|
||||
m_db = db;
|
||||
|
||||
m_hardfork = new HardFork(*db);
|
||||
for (size_t n = 0; n < sizeof(hard_forks) / sizeof(hard_forks[0]); ++n)
|
||||
m_hardfork->add(hard_forks[n].version, hard_forks[n].height, hard_forks[n].time);
|
||||
m_hardfork->init();
|
||||
|
||||
// if the blockchain is new, add the genesis block
|
||||
// this feels kinda kludgy to do it this way, but can be looked at later.
|
||||
// TODO: add function to create and store genesis block,
|
||||
|
@ -355,9 +372,6 @@ bool Blockchain::init(BlockchainDB* db, const bool testnet)
|
|||
}
|
||||
#endif
|
||||
|
||||
// reinitialize hard fork versions, since they're not saved in the DB
|
||||
m_hardfork.reorganize_from_chain_height (m_db, 1);
|
||||
|
||||
LOG_PRINT_GREEN("Blockchain initialized. last block: " << m_db->height() - 1 << ", " << epee::misc_utils::get_time_interval_string(timestamp_diff) << " time ago, current difficulty: " << get_difficulty_for_next_block(), LOG_LEVEL_0);
|
||||
|
||||
return true;
|
||||
|
@ -424,6 +438,7 @@ bool Blockchain::deinit()
|
|||
LOG_PRINT_L0("There was an issue closing/storing the blockchain, shutting down now to prevent issues!");
|
||||
}
|
||||
|
||||
delete m_hardfork;
|
||||
delete m_db;
|
||||
return true;
|
||||
}
|
||||
|
@ -714,7 +729,7 @@ bool Blockchain::rollback_blockchain_switching(std::list<block>& original_chain,
|
|||
CHECK_AND_ASSERT_MES(r && bvc.m_added_to_main_chain, false, "PANIC! failed to add (again) block while chain switching during the rollback!");
|
||||
}
|
||||
|
||||
m_hardfork.reorganize_from_chain_height(m_db, rollback_height);
|
||||
m_hardfork->reorganize_from_chain_height(rollback_height);
|
||||
|
||||
LOG_PRINT_L1("Rollback to height " << rollback_height << " was successful.");
|
||||
if (original_chain.size())
|
||||
|
@ -813,7 +828,7 @@ bool Blockchain::switch_to_alternative_blockchain(std::list<blocks_ext_by_hash::
|
|||
m_alternative_chains.erase(ch_ent);
|
||||
}
|
||||
|
||||
m_hardfork.reorganize_from_chain_height(m_db, split_height);
|
||||
m_hardfork->reorganize_from_chain_height(split_height);
|
||||
|
||||
LOG_PRINT_GREEN("REORGANIZE SUCCESS! on height: " << split_height << ", new blockchain size: " << m_db->height(), LOG_LEVEL_0);
|
||||
return true;
|
||||
|
@ -986,7 +1001,7 @@ bool Blockchain::create_block_template(block& b, const account_public_address& m
|
|||
CRITICAL_REGION_BEGIN(m_blockchain_lock);
|
||||
height = m_db->height();
|
||||
|
||||
b.major_version = m_hardfork.get_ideal_version();
|
||||
b.major_version = m_hardfork->get_ideal_version();
|
||||
b.minor_version = 0;
|
||||
b.prev_id = get_tail_id();
|
||||
b.timestamp = time(NULL);
|
||||
|
@ -2259,9 +2274,9 @@ bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash&
|
|||
TIME_MEASURE_START(t1);
|
||||
|
||||
// this is a cheap test
|
||||
if (!m_hardfork.check(bl))
|
||||
if (!m_hardfork->check(bl))
|
||||
{
|
||||
LOG_PRINT_L1("Block with id: " << id << std::endl << "has old version: " << bl.major_version << std::endl << "current: " << m_hardfork.get_current_version());
|
||||
LOG_PRINT_L1("Block with id: " << id << std::endl << "has old version: " << bl.major_version << std::endl << "current: " << m_hardfork->get_current_version());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2542,7 +2557,7 @@ bool Blockchain::handle_block_to_main_chain(const block& bl, const crypto::hash&
|
|||
update_next_cumulative_size_limit();
|
||||
|
||||
// this will not fail since check succeeded above
|
||||
m_hardfork.add(bl, new_height - 1);
|
||||
m_hardfork->add(bl, new_height - 1);
|
||||
|
||||
LOG_PRINT_L1("+++++ BLOCK SUCCESSFULLY ADDED" << std::endl << "id:\t" << id << std::endl << "PoW:\t" << proof_of_work << std::endl << "HEIGHT " << new_height << ", difficulty:\t" << current_diffic << std::endl << "block reward: " << print_money(fee_summary + base_reward) << "(" << print_money(base_reward) << " + " << print_money(fee_summary) << "), coinbase_blob_size: " << coinbase_blob_size << ", cumulative size: " << cumulative_block_size << ", " << block_processing_time << "(" << target_calculating_time << "/" << longhash_calculating_time << ")ms");
|
||||
if(m_show_time_stats)
|
||||
|
@ -3080,10 +3095,10 @@ void Blockchain::set_user_options(uint64_t maxthreads, uint64_t blocks_per_sync,
|
|||
|
||||
HardFork::State Blockchain::get_hard_fork_state() const
|
||||
{
|
||||
return m_hardfork.get_state();
|
||||
return m_hardfork->get_state();
|
||||
}
|
||||
|
||||
bool Blockchain::get_hard_fork_voting_info(uint8_t version, uint32_t &window, uint32_t &votes, uint32_t &threshold, uint8_t &voting) const
|
||||
{
|
||||
return m_hardfork.get_voting_info(version, window, votes, threshold, voting);
|
||||
return m_hardfork->get_voting_info(version, window, votes, threshold, voting);
|
||||
}
|
||||
|
|
|
@ -160,8 +160,8 @@ namespace cryptonote
|
|||
void set_show_time_stats(bool stats) { m_show_time_stats = stats; }
|
||||
|
||||
HardFork::State get_hard_fork_state() const;
|
||||
uint8_t get_current_hard_fork_version() const { return m_hardfork.get_current_version(); }
|
||||
uint8_t get_ideal_hard_fork_version() const { return m_hardfork.get_ideal_version(); }
|
||||
uint8_t get_current_hard_fork_version() const { return m_hardfork->get_current_version(); }
|
||||
uint8_t get_ideal_hard_fork_version() const { return m_hardfork->get_ideal_version(); }
|
||||
bool get_hard_fork_voting_info(uint8_t version, uint32_t &window, uint32_t &votes, uint32_t &threshold, uint8_t &voting) const;
|
||||
|
||||
BlockchainDB& get_db()
|
||||
|
@ -233,7 +233,7 @@ namespace cryptonote
|
|||
std::atomic<bool> m_is_blockchain_storing;
|
||||
bool m_enforce_dns_checkpoints;
|
||||
|
||||
HardFork m_hardfork;
|
||||
HardFork *m_hardfork;
|
||||
|
||||
template<class visitor_t>
|
||||
inline bool scan_outputkeys_for_indexes(const txin_to_key& tx_in_to_key, visitor_t &vis, const crypto::hash &tx_prefix_hash, uint64_t* pmax_related_block_height = NULL) const;
|
||||
|
|
|
@ -35,15 +35,14 @@
|
|||
|
||||
using namespace cryptonote;
|
||||
|
||||
HardFork::HardFork(uint8_t original_version, time_t forked_time, time_t update_time, uint64_t max_history, int threshold_percent, uint64_t checkpoint_period):
|
||||
HardFork::HardFork(cryptonote::BlockchainDB &db, uint8_t original_version, time_t forked_time, time_t update_time, uint64_t window_size, int threshold_percent):
|
||||
db(db),
|
||||
original_version(original_version),
|
||||
forked_time(forked_time),
|
||||
update_time(update_time),
|
||||
max_history(max_history),
|
||||
threshold_percent(threshold_percent),
|
||||
checkpoint_period(checkpoint_period)
|
||||
window_size(window_size),
|
||||
threshold_percent(threshold_percent)
|
||||
{
|
||||
init();
|
||||
}
|
||||
|
||||
bool HardFork::add(uint8_t version, uint64_t height, time_t time)
|
||||
|
@ -96,7 +95,7 @@ bool HardFork::add(const cryptonote::block &block, uint64_t height)
|
|||
|
||||
const uint8_t version = get_effective_version(block);
|
||||
|
||||
while (versions.size() >= max_history) {
|
||||
while (versions.size() >= window_size) {
|
||||
const uint8_t old_version = versions.front();
|
||||
last_versions[old_version]--;
|
||||
assert(last_versions[old_version] >= 0);
|
||||
|
@ -109,13 +108,12 @@ bool HardFork::add(const cryptonote::block &block, uint64_t height)
|
|||
uint8_t voted = get_voted_fork_index(height);
|
||||
if (voted > current_fork_index) {
|
||||
for (int v = heights[current_fork_index].version + 1; v <= heights[voted].version; ++v) {
|
||||
starting[v] = height;
|
||||
db.set_hard_fork_starting_height(v, height);
|
||||
}
|
||||
current_fork_index = voted;
|
||||
}
|
||||
|
||||
if (height % checkpoint_period == 0)
|
||||
checkpoints.push_back(std::make_pair(height, current_fork_index));
|
||||
db.set_hard_fork_version(height, heights[current_fork_index].version);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -126,59 +124,66 @@ void HardFork::init()
|
|||
versions.clear();
|
||||
for (size_t n = 0; n < 256; ++n)
|
||||
last_versions[n] = 0;
|
||||
for (size_t n = 0; n < 256; ++n)
|
||||
starting[n] = std::numeric_limits<uint64_t>::max();
|
||||
add(original_version, 0, 0);
|
||||
for (size_t n = 0; n <= original_version; ++n)
|
||||
starting[n] = 0;
|
||||
checkpoints.clear();
|
||||
current_fork_index = 0;
|
||||
vote_threshold = (uint32_t)ceilf(max_history * threshold_percent / 100.0f);
|
||||
vote_threshold = (uint32_t)ceilf(window_size * threshold_percent / 100.0f);
|
||||
|
||||
// restore state from DB
|
||||
uint64_t height = db.height();
|
||||
if (height > window_size)
|
||||
height -= window_size;
|
||||
else
|
||||
height = 1;
|
||||
|
||||
bool populate = db.get_hard_fork_starting_height(original_version) == std::numeric_limits<uint64_t>::max();
|
||||
if (populate) {
|
||||
LOG_PRINT_L0("The DB has no hard fork info, reparsing from start");
|
||||
height = 1;
|
||||
}
|
||||
LOG_PRINT_L1("reorganizing from " << height);
|
||||
reorganize_from_chain_height(height);
|
||||
if (populate) {
|
||||
// reorg will not touch the genesis block, use this as a flag for populating done
|
||||
db.set_hard_fork_version(0, original_version);
|
||||
db.set_hard_fork_starting_height(original_version, 0);
|
||||
}
|
||||
LOG_PRINT_L1("reorganization done");
|
||||
}
|
||||
|
||||
bool HardFork::reorganize_from_block_height(const cryptonote::BlockchainDB *db, uint64_t height)
|
||||
bool HardFork::reorganize_from_block_height(uint64_t height)
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(lock);
|
||||
if (!db || height >= db->height())
|
||||
if (height >= db.height())
|
||||
return false;
|
||||
while (!checkpoints.empty() && checkpoints.back().first > height)
|
||||
checkpoints.pop_back();
|
||||
|
||||
versions.clear();
|
||||
|
||||
int v;
|
||||
for (v = 255; v >= 0; --v) {
|
||||
if (starting[v] <= height)
|
||||
break;
|
||||
if (starting[v] != std::numeric_limits<uint64_t>::max()) {
|
||||
starting[v] = std::numeric_limits<uint64_t>::max();
|
||||
}
|
||||
}
|
||||
for (current_fork_index = 0; current_fork_index < heights.size(); ++current_fork_index) {
|
||||
if (heights[current_fork_index].version == v)
|
||||
break;
|
||||
}
|
||||
for (size_t n = 0; n < 256; ++n)
|
||||
last_versions[n] = 0;
|
||||
const uint64_t rescan_height = height >= (max_history - 1) ? height - (max_history - 1) : 0;
|
||||
const uint64_t rescan_height = height >= (window_size - 1) ? height - (window_size - 1) : 0;
|
||||
const uint8_t start_version = height == 0 ? original_version : db.get_hard_fork_version(height);
|
||||
while (heights[current_fork_index].version > start_version) {
|
||||
db.set_hard_fork_starting_height(heights[current_fork_index].version, std::numeric_limits<uint64_t>::max());
|
||||
--current_fork_index;
|
||||
}
|
||||
for (uint64_t h = rescan_height; h <= height; ++h) {
|
||||
cryptonote::block b = db->get_block_from_height(h);
|
||||
cryptonote::block b = db.get_block_from_height(h);
|
||||
const uint8_t v = get_effective_version(b);
|
||||
last_versions[v]++;
|
||||
versions.push_back(v);
|
||||
}
|
||||
const uint64_t bc_height = db->height();
|
||||
const uint64_t bc_height = db.height();
|
||||
for (uint64_t h = height + 1; h < bc_height; ++h) {
|
||||
add(db->get_block_from_height(h), h);
|
||||
add(db.get_block_from_height(h), h);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HardFork::reorganize_from_chain_height(const cryptonote::BlockchainDB *db, uint64_t height)
|
||||
bool HardFork::reorganize_from_chain_height(uint64_t height)
|
||||
{
|
||||
if (height == 0)
|
||||
return false;
|
||||
return reorganize_from_block_height(db, height - 1);
|
||||
return reorganize_from_block_height(height - 1);
|
||||
}
|
||||
|
||||
int HardFork::get_voted_fork_index(uint64_t height) const
|
||||
|
@ -219,18 +224,17 @@ HardFork::State HardFork::get_state() const
|
|||
uint8_t HardFork::get(uint64_t height) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(lock);
|
||||
for (size_t n = 1; n < 256; ++n) {
|
||||
if (starting[n] > height)
|
||||
return n - 1;
|
||||
}
|
||||
if (height > db.height()) {
|
||||
assert(false);
|
||||
return 255;
|
||||
}
|
||||
return db.get_hard_fork_version(height);
|
||||
}
|
||||
|
||||
uint64_t HardFork::get_start_height(uint8_t version) const
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(lock);
|
||||
return starting[version];
|
||||
return db.get_hard_fork_starting_height(version);
|
||||
}
|
||||
|
||||
uint8_t HardFork::get_current_version() const
|
||||
|
@ -261,20 +265,3 @@ bool HardFork::get_voting_info(uint8_t version, uint32_t &window, uint32_t &vote
|
|||
return enabled;
|
||||
}
|
||||
|
||||
template<class archive_t>
|
||||
void HardFork::serialize(archive_t & ar, const unsigned int version)
|
||||
{
|
||||
CRITICAL_REGION_LOCAL(lock);
|
||||
ar & forked_time;
|
||||
ar & update_time;
|
||||
ar & max_history;
|
||||
ar & threshold_percent;
|
||||
ar & original_version;
|
||||
ar & heights;
|
||||
ar & last_versions;
|
||||
ar & starting;
|
||||
ar & current_fork_index;
|
||||
ar & vote_threshold;
|
||||
ar & checkpoint_period;
|
||||
ar & checkpoints;
|
||||
}
|
||||
|
|
|
@ -28,8 +28,6 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/serialization/serialization.hpp>
|
||||
#include <boost/serialization/version.hpp>
|
||||
#include "syncobj.h"
|
||||
#include "cryptonote_core/cryptonote_basic.h"
|
||||
|
||||
|
@ -48,9 +46,8 @@ namespace cryptonote
|
|||
|
||||
static const time_t DEFAULT_FORKED_TIME = 31557600; // a year in seconds
|
||||
static const time_t DEFAULT_UPDATE_TIME = 31557600 / 2;
|
||||
static const uint64_t DEFAULT_MAX_HISTORY = 50; // supermajority window check length
|
||||
static const uint64_t DEFAULT_WINDOW_SIZE = 50; // supermajority window check length
|
||||
static const int DEFAULT_THRESHOLD_PERCENT = 80;
|
||||
static const uint64_t DEFAULT_CHECKPOINT_PERIOD = 1024; // mark a checkpoint every that many blocks
|
||||
|
||||
/**
|
||||
* @brief creates a new HardFork object
|
||||
|
@ -58,10 +55,10 @@ namespace cryptonote
|
|||
* @param original_version the block version for blocks 0 through to the first fork
|
||||
* @param forked_time the time in seconds before thinking we're forked
|
||||
* @param update_time the time in seconds before thinking we need to update
|
||||
* @param max_history the size of the window in blocks to consider for version voting
|
||||
* @param window_size the size of the window in blocks to consider for version voting
|
||||
* @param threshold_percent the size of the majority in percents
|
||||
*/
|
||||
HardFork(uint8_t original_version = 1, time_t forked_time = DEFAULT_FORKED_TIME, time_t update_time = DEFAULT_UPDATE_TIME, uint64_t max_history = DEFAULT_MAX_HISTORY, int threshold_percent = DEFAULT_THRESHOLD_PERCENT, uint64_t checkpoint_period = DEFAULT_CHECKPOINT_PERIOD);
|
||||
HardFork(cryptonote::BlockchainDB &db, uint8_t original_version = 1, time_t forked_time = DEFAULT_FORKED_TIME, time_t update_time = DEFAULT_UPDATE_TIME, uint64_t window_size = DEFAULT_WINDOW_SIZE, int threshold_percent = DEFAULT_THRESHOLD_PERCENT);
|
||||
|
||||
/**
|
||||
* @brief add a new hardfork height
|
||||
|
@ -74,6 +71,13 @@ namespace cryptonote
|
|||
*/
|
||||
bool add(uint8_t version, uint64_t height, time_t time);
|
||||
|
||||
/**
|
||||
* @brief initialize the object
|
||||
*
|
||||
* Must be done after adding all the required hardforks via add above
|
||||
*/
|
||||
void init();
|
||||
|
||||
/**
|
||||
* @brief check whether a new block would be accepted
|
||||
*
|
||||
|
@ -91,9 +95,7 @@ namespace cryptonote
|
|||
* call add first, then, if the hard fork requirements are met,
|
||||
* add the block to the blockchain, upon which a failure (the
|
||||
* block being invalid, double spending, etc) would cause the
|
||||
* hardfork object to rescan the blockchain versions past the
|
||||
* last checkpoint, potentially causing a large number of DB
|
||||
* operations.
|
||||
* hardfork object to reorganize.
|
||||
*/
|
||||
bool check(const cryptonote::block &block) const;
|
||||
|
||||
|
@ -117,8 +119,8 @@ namespace cryptonote
|
|||
* @param blockchain the blockchain
|
||||
* @param height of the last block kept from the previous blockchain
|
||||
*/
|
||||
bool reorganize_from_block_height(const cryptonote::BlockchainDB *db, uint64_t height);
|
||||
bool reorganize_from_chain_height(const cryptonote::BlockchainDB *db, uint64_t height);
|
||||
bool reorganize_from_block_height(uint64_t height);
|
||||
bool reorganize_from_chain_height(uint64_t height);
|
||||
|
||||
/**
|
||||
* @brief returns current state at the given time
|
||||
|
@ -176,23 +178,21 @@ namespace cryptonote
|
|||
/**
|
||||
* @brief returns the size of the voting window in blocks
|
||||
*/
|
||||
uint64_t get_window_size() const { return max_history; }
|
||||
|
||||
template<class archive_t>
|
||||
void serialize(archive_t & ar, const unsigned int version);
|
||||
uint64_t get_window_size() const { return window_size; }
|
||||
|
||||
private:
|
||||
|
||||
void init();
|
||||
bool do_check(const cryptonote::block &block) const;
|
||||
int get_voted_fork_index(uint64_t height) const;
|
||||
uint8_t get_effective_version(const cryptonote::block &block) const;
|
||||
|
||||
private:
|
||||
|
||||
BlockchainDB &db;
|
||||
|
||||
time_t forked_time;
|
||||
time_t update_time;
|
||||
uint64_t max_history;
|
||||
uint64_t window_size;
|
||||
int threshold_percent;
|
||||
|
||||
uint8_t original_version;
|
||||
|
@ -206,16 +206,11 @@ namespace cryptonote
|
|||
|
||||
std::deque<uint8_t> versions; /* rolling window of the last N blocks' versions */
|
||||
unsigned int last_versions[256]; /* count of the block versions in the last N blocks */
|
||||
uint64_t starting[256]; /* block height at which each fork starts */
|
||||
unsigned int current_fork_index;
|
||||
uint32_t current_fork_index;
|
||||
uint32_t vote_threshold;
|
||||
|
||||
uint64_t checkpoint_period;
|
||||
std::vector<std::pair<uint64_t, int>> checkpoints;
|
||||
|
||||
mutable epee::critical_section lock;
|
||||
};
|
||||
|
||||
} // namespace cryptonote
|
||||
|
||||
BOOST_CLASS_VERSION(cryptonote::HardFork, 1)
|
||||
|
|
|
@ -39,101 +39,13 @@ using namespace cryptonote;
|
|||
#define BLOCKS_PER_YEAR 525960
|
||||
#define SECONDS_PER_YEAR 31557600
|
||||
|
||||
static cryptonote::block mkblock(uint8_t version)
|
||||
{
|
||||
cryptonote::block b;
|
||||
b.major_version = version;
|
||||
return b;
|
||||
}
|
||||
|
||||
TEST(empty_hardforks, Success)
|
||||
{
|
||||
HardFork hf;
|
||||
|
||||
ASSERT_TRUE(hf.get_state(time(NULL)) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready);
|
||||
|
||||
ASSERT_EQ(hf.get(0), 1);
|
||||
ASSERT_EQ(hf.get(1), 1);
|
||||
ASSERT_EQ(hf.get(100000000), 1);
|
||||
}
|
||||
|
||||
TEST(ordering, Success)
|
||||
{
|
||||
HardFork hf;
|
||||
|
||||
ASSERT_TRUE(hf.add(2, 2, 1));
|
||||
ASSERT_FALSE(hf.add(3, 3, 1));
|
||||
ASSERT_FALSE(hf.add(3, 2, 2));
|
||||
ASSERT_FALSE(hf.add(2, 3, 2));
|
||||
ASSERT_TRUE(hf.add(3, 10, 2));
|
||||
ASSERT_TRUE(hf.add(4, 20, 3));
|
||||
ASSERT_FALSE(hf.add(5, 5, 4));
|
||||
}
|
||||
|
||||
TEST(states, Success)
|
||||
{
|
||||
HardFork hf;
|
||||
|
||||
ASSERT_TRUE(hf.add(2, BLOCKS_PER_YEAR, SECONDS_PER_YEAR));
|
||||
|
||||
ASSERT_TRUE(hf.get_state(0) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_UPDATE_TIME / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + (HardFork::DEFAULT_UPDATE_TIME + HardFork::DEFAULT_FORKED_TIME) / 2) == HardFork::UpdateNeeded);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_FORKED_TIME * 2) == HardFork::LikelyForked);
|
||||
|
||||
ASSERT_TRUE(hf.add(3, BLOCKS_PER_YEAR * 5, SECONDS_PER_YEAR * 5));
|
||||
|
||||
ASSERT_TRUE(hf.get_state(0) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_UPDATE_TIME / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + (HardFork::DEFAULT_UPDATE_TIME + HardFork::DEFAULT_FORKED_TIME) / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_FORKED_TIME * 2) == HardFork::Ready);
|
||||
}
|
||||
|
||||
TEST(steps_asap, Success)
|
||||
{
|
||||
HardFork hf(1,1,1,1);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(4, 2, 1));
|
||||
ASSERT_TRUE(hf.add(7, 4, 2));
|
||||
ASSERT_TRUE(hf.add(9, 6, 3));
|
||||
|
||||
for (uint64_t h = 0; h < 10; ++h)
|
||||
hf.add(mkblock(10), h);
|
||||
|
||||
ASSERT_EQ(hf.get(0), 1);
|
||||
ASSERT_EQ(hf.get(1), 1);
|
||||
ASSERT_EQ(hf.get(2), 4);
|
||||
ASSERT_EQ(hf.get(3), 4);
|
||||
ASSERT_EQ(hf.get(4), 7);
|
||||
ASSERT_EQ(hf.get(5), 7);
|
||||
ASSERT_EQ(hf.get(6), 9);
|
||||
ASSERT_EQ(hf.get(7), 9);
|
||||
ASSERT_EQ(hf.get(8), 9);
|
||||
ASSERT_EQ(hf.get(9), 9);
|
||||
ASSERT_EQ(hf.get(100000), 9);
|
||||
}
|
||||
|
||||
TEST(steps_1, Success)
|
||||
{
|
||||
HardFork hf(1,1,1,1);
|
||||
|
||||
// v h t
|
||||
for (int n = 1 ; n < 10; ++n)
|
||||
ASSERT_TRUE(hf.add(n+1, n, n));
|
||||
|
||||
for (uint64_t h = 0; h < 10; ++h) {
|
||||
hf.add(mkblock(h+1), h);
|
||||
ASSERT_EQ(hf.get(h), h+1);
|
||||
}
|
||||
}
|
||||
|
||||
class TestDB: public BlockchainDB {
|
||||
public:
|
||||
virtual void open(const std::string& filename, const int db_flags = 0) {}
|
||||
virtual void open(const std::string& filename, const int db_flags = 0) {
|
||||
for (size_t n = 0; n < 256; ++n)
|
||||
starting_height[n] = std::numeric_limits<uint64_t>::max();
|
||||
}
|
||||
virtual void close() {}
|
||||
virtual void sync() {}
|
||||
virtual void reset() {}
|
||||
|
@ -197,37 +109,165 @@ public:
|
|||
virtual block get_block_from_height(const uint64_t& height) const {
|
||||
return blocks[height];
|
||||
}
|
||||
virtual void set_hard_fork_starting_height(uint8_t version, uint64_t height) {
|
||||
starting_height[version] = height;
|
||||
}
|
||||
virtual uint64_t get_hard_fork_starting_height(uint8_t version) const {
|
||||
return starting_height[version];
|
||||
}
|
||||
virtual void set_hard_fork_version(uint64_t height, uint8_t version) {
|
||||
printf("set_hard_fork_version(%lu, %u)\n", (unsigned long)height, version);
|
||||
if (versions.size() <= height) versions.resize(height+1); versions[height] = version;
|
||||
}
|
||||
virtual uint8_t get_hard_fork_version(uint64_t height) const {
|
||||
printf("get_hard_fork_version(%lu)\n", (unsigned long)height);
|
||||
return versions[height];
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<block> blocks;
|
||||
uint64_t starting_height[256];
|
||||
std::deque<uint8_t> versions;
|
||||
};
|
||||
|
||||
static cryptonote::block mkblock(uint8_t version)
|
||||
{
|
||||
cryptonote::block b;
|
||||
b.major_version = version;
|
||||
return b;
|
||||
}
|
||||
|
||||
TEST(empty_hardforks, Success)
|
||||
{
|
||||
TestDB db;
|
||||
HardFork hf(db);
|
||||
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
hf.init();
|
||||
ASSERT_TRUE(hf.get_state(time(NULL)) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(time(NULL) + 3600*24*400) == HardFork::Ready);
|
||||
|
||||
for (uint64_t h = 0; h <= 10; ++h) {
|
||||
db.add_block(mkblock(1), 0, 0, 0, crypto::hash());
|
||||
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
|
||||
}
|
||||
ASSERT_EQ(hf.get(0), 1);
|
||||
ASSERT_EQ(hf.get(1), 1);
|
||||
ASSERT_EQ(hf.get(10), 1);
|
||||
}
|
||||
|
||||
TEST(ordering, Success)
|
||||
{
|
||||
TestDB db;
|
||||
HardFork hf(db);
|
||||
|
||||
ASSERT_TRUE(hf.add(2, 2, 1));
|
||||
ASSERT_FALSE(hf.add(3, 3, 1));
|
||||
ASSERT_FALSE(hf.add(3, 2, 2));
|
||||
ASSERT_FALSE(hf.add(2, 3, 2));
|
||||
ASSERT_TRUE(hf.add(3, 10, 2));
|
||||
ASSERT_TRUE(hf.add(4, 20, 3));
|
||||
ASSERT_FALSE(hf.add(5, 5, 4));
|
||||
}
|
||||
|
||||
TEST(states, Success)
|
||||
{
|
||||
TestDB db;
|
||||
HardFork hf(db);
|
||||
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(2, BLOCKS_PER_YEAR, SECONDS_PER_YEAR));
|
||||
|
||||
ASSERT_TRUE(hf.get_state(0) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_UPDATE_TIME / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + (HardFork::DEFAULT_UPDATE_TIME + HardFork::DEFAULT_FORKED_TIME) / 2) == HardFork::UpdateNeeded);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_FORKED_TIME * 2) == HardFork::LikelyForked);
|
||||
|
||||
ASSERT_TRUE(hf.add(3, BLOCKS_PER_YEAR * 5, SECONDS_PER_YEAR * 5));
|
||||
|
||||
ASSERT_TRUE(hf.get_state(0) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_UPDATE_TIME / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + (HardFork::DEFAULT_UPDATE_TIME + HardFork::DEFAULT_FORKED_TIME) / 2) == HardFork::Ready);
|
||||
ASSERT_TRUE(hf.get_state(SECONDS_PER_YEAR + HardFork::DEFAULT_FORKED_TIME * 2) == HardFork::Ready);
|
||||
}
|
||||
|
||||
TEST(steps_asap, Success)
|
||||
{
|
||||
TestDB db;
|
||||
HardFork hf(db, 1,1,1,1);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(4, 2, 1));
|
||||
ASSERT_TRUE(hf.add(7, 4, 2));
|
||||
ASSERT_TRUE(hf.add(9, 6, 3));
|
||||
hf.init();
|
||||
|
||||
for (uint64_t h = 0; h < 10; ++h) {
|
||||
db.add_block(mkblock(10), 0, 0, 0, crypto::hash());
|
||||
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
|
||||
}
|
||||
|
||||
ASSERT_EQ(hf.get(0), 1);
|
||||
ASSERT_EQ(hf.get(1), 1);
|
||||
ASSERT_EQ(hf.get(2), 4);
|
||||
ASSERT_EQ(hf.get(3), 4);
|
||||
ASSERT_EQ(hf.get(4), 7);
|
||||
ASSERT_EQ(hf.get(5), 7);
|
||||
ASSERT_EQ(hf.get(6), 9);
|
||||
ASSERT_EQ(hf.get(7), 9);
|
||||
ASSERT_EQ(hf.get(8), 9);
|
||||
ASSERT_EQ(hf.get(9), 9);
|
||||
}
|
||||
|
||||
TEST(steps_1, Success)
|
||||
{
|
||||
TestDB db;
|
||||
HardFork hf(db, 1,1,1,1);
|
||||
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
for (int n = 1 ; n < 10; ++n)
|
||||
ASSERT_TRUE(hf.add(n+1, n, n));
|
||||
hf.init();
|
||||
|
||||
for (uint64_t h = 0 ; h < 10; ++h) {
|
||||
db.add_block(mkblock(h+1), 0, 0, 0, crypto::hash());
|
||||
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
|
||||
}
|
||||
|
||||
for (uint64_t h = 0; h < 10; ++h) {
|
||||
ASSERT_EQ(hf.get(h), h+1);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(reorganize, Same)
|
||||
{
|
||||
for (int history = 1; history <= 12; ++history) {
|
||||
for (uint64_t checkpoint_period = 1; checkpoint_period <= 16; checkpoint_period++) {
|
||||
HardFork hf(1, 1, 1, history, 100, checkpoint_period);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, history, 100);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(4, 2, 1));
|
||||
ASSERT_TRUE(hf.add(7, 4, 2));
|
||||
ASSERT_TRUE(hf.add(9, 6, 3));
|
||||
hf.init();
|
||||
|
||||
// index 0 1 2 3 4 5 6 7 8 9
|
||||
static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
|
||||
for (uint64_t h = 0; h < 20; ++h) {
|
||||
db.add_block(mkblock(block_versions[h]), 0, 0, 0, crypto::hash());
|
||||
hf.add(db.get_block_from_height(h), h);
|
||||
ASSERT_TRUE(hf.add(db.get_block_from_height(h), h));
|
||||
}
|
||||
|
||||
for (uint64_t rh = 0; rh < 20; ++rh) {
|
||||
hf.reorganize_from_block_height(&db, rh);
|
||||
hf.reorganize_from_block_height(rh);
|
||||
for (int hh = 0; hh < 20; ++hh) {
|
||||
uint8_t version = hh >= (history-1) ? block_versions[hh - (history-1)] : 1;
|
||||
ASSERT_EQ(hf.get(hh), version);
|
||||
}
|
||||
ASSERT_EQ(hf.get(100000), 9);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -235,74 +275,87 @@ TEST(reorganize, Same)
|
|||
TEST(reorganize, Changed)
|
||||
{
|
||||
int history = 4;
|
||||
for (uint64_t checkpoint_period = 1; checkpoint_period <= 16; checkpoint_period++) {
|
||||
HardFork hf(1, 1, 1, 4, 100, checkpoint_period);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, 4, 100);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(4, 2, 1));
|
||||
ASSERT_TRUE(hf.add(7, 4, 2));
|
||||
ASSERT_TRUE(hf.add(9, 6, 3));
|
||||
hf.init();
|
||||
|
||||
// index 0 1 2 3 4 5 6 7 8 9
|
||||
static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
|
||||
for (uint64_t h = 0; h < 20; ++h) {
|
||||
static const uint8_t block_versions[] = { 1, 1, 4, 4, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
|
||||
for (uint64_t h = 0; h < 16; ++h) {
|
||||
db.add_block(mkblock(block_versions[h]), 0, 0, 0, crypto::hash());
|
||||
hf.add(db.get_block_from_height(h), h);
|
||||
ASSERT_TRUE (hf.add(db.get_block_from_height(h), h));
|
||||
}
|
||||
|
||||
for (uint64_t rh = 0; rh < 20; ++rh) {
|
||||
hf.reorganize_from_block_height(&db, rh);
|
||||
for (int hh = 0; hh < 20; ++hh) {
|
||||
for (uint64_t rh = 0; rh < 16; ++rh) {
|
||||
hf.reorganize_from_block_height(rh);
|
||||
for (int hh = 0; hh < 16; ++hh) {
|
||||
uint8_t version = hh >= (history-1) ? block_versions[hh - (history-1)] : 1;
|
||||
ASSERT_EQ(hf.get(hh), version);
|
||||
}
|
||||
ASSERT_EQ(hf.get(100000), 9);
|
||||
}
|
||||
|
||||
// delay a bit for 9, and go back to 1 to check it stays at 9
|
||||
static const uint8_t block_versions_new[] = { 1, 1, 4, 4, 7, 7, 4, 7, 7, 7, 9, 9, 9, 9, 9, 1, 1, 1, 1, 1 };
|
||||
static const uint8_t expected_versions_new[] = { 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9 };
|
||||
for (uint64_t h = 3; h < 20; ++h) {
|
||||
static const uint8_t block_versions_new[] = { 1, 1, 4, 4, 7, 7, 4, 7, 7, 7, 9, 9, 9, 9, 9, 1 };
|
||||
static const uint8_t expected_versions_new[] = { 1, 1, 1, 1, 1, 4, 4, 4, 4, 4, 7, 7, 7, 9, 9, 9 };
|
||||
for (uint64_t h = 3; h < 16; ++h) {
|
||||
db.remove_block();
|
||||
}
|
||||
ASSERT_EQ(db.height(), 3);
|
||||
hf.reorganize_from_block_height(&db, 2);
|
||||
for (uint64_t h = 3; h < 20; ++h) {
|
||||
hf.reorganize_from_block_height(2);
|
||||
for (uint64_t h = 3; h < 16; ++h) {
|
||||
db.add_block(mkblock(block_versions_new[h]), 0, 0, 0, crypto::hash());
|
||||
hf.add(db.get_block_from_height(h), h);
|
||||
bool ret = hf.add(db.get_block_from_height(h), h);
|
||||
ASSERT_EQ (ret, h < 15);
|
||||
}
|
||||
for (int hh = 0; hh < 20; ++hh) {
|
||||
db.remove_block(); // last block added to the blockchain, but not hf
|
||||
ASSERT_EQ(db.height(), 15);
|
||||
for (int hh = 0; hh < 15; ++hh) {
|
||||
ASSERT_EQ(hf.get(hh), expected_versions_new[hh]);
|
||||
}
|
||||
ASSERT_EQ(hf.get(100000), 9);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(voting, threshold)
|
||||
{
|
||||
for (int threshold = 87; threshold <= 88; ++threshold) {
|
||||
HardFork hf(1, 1, 1, 8, threshold, 10);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, 8, threshold);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(2, 2, 1));
|
||||
hf.init();
|
||||
|
||||
for (uint64_t h = 0; h < 10; ++h) {
|
||||
for (uint64_t h = 0; h <= 8; ++h) {
|
||||
uint8_t v = 1 + !!(h % 8);
|
||||
hf.add(mkblock(v), h);
|
||||
db.add_block(mkblock(v), 0, 0, 0, crypto::hash());
|
||||
bool ret = hf.add(db.get_block_from_height(h), h);
|
||||
if (h >= 8 && threshold == 87) {
|
||||
ASSERT_FALSE(ret);
|
||||
}
|
||||
else {
|
||||
ASSERT_TRUE(ret);
|
||||
uint8_t expected = threshold == 88 ? 1 : h < 7 ? 1 : 2;
|
||||
ASSERT_EQ(hf.get(h), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(new_blocks, denied)
|
||||
{
|
||||
HardFork hf(1, 1, 1, 4, 50, 10);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, 4, 50);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(2, 2, 1));
|
||||
hf.init();
|
||||
|
||||
ASSERT_FALSE(hf.add(mkblock(0), 0));
|
||||
ASSERT_TRUE(hf.add(mkblock(1), 0));
|
||||
|
@ -322,10 +375,13 @@ TEST(new_blocks, denied)
|
|||
|
||||
TEST(new_version, early)
|
||||
{
|
||||
HardFork hf(1, 1, 1, 4, 50, 10);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, 4, 50);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(2, 4, 1));
|
||||
hf.init();
|
||||
|
||||
ASSERT_FALSE(hf.add(mkblock(0), 0));
|
||||
ASSERT_TRUE(hf.add(mkblock(2), 0));
|
||||
|
@ -342,12 +398,14 @@ TEST(new_version, early)
|
|||
|
||||
TEST(reorganize, changed)
|
||||
{
|
||||
HardFork hf(1, 1, 1, 4, 50, 10);
|
||||
TestDB db;
|
||||
HardFork hf(db, 1, 1, 1, 4, 50);
|
||||
|
||||
// v h t
|
||||
ASSERT_TRUE(hf.add(1, 0, 0));
|
||||
ASSERT_TRUE(hf.add(2, 2, 1));
|
||||
ASSERT_TRUE(hf.add(3, 5, 2));
|
||||
hf.init();
|
||||
|
||||
#define ADD(v, h, a) \
|
||||
do { \
|
||||
|
@ -376,10 +434,10 @@ TEST(reorganize, changed)
|
|||
|
||||
// pop a few blocks and check current version goes back down
|
||||
db.remove_block();
|
||||
hf.reorganize_from_block_height(&db, 8);
|
||||
hf.reorganize_from_block_height(8);
|
||||
ASSERT_EQ(hf.get_current_version(), 3);
|
||||
db.remove_block();
|
||||
hf.reorganize_from_block_height(&db, 7);
|
||||
hf.reorganize_from_block_height(7);
|
||||
ASSERT_EQ(hf.get_current_version(), 2);
|
||||
db.remove_block();
|
||||
ASSERT_EQ(hf.get_current_version(), 2);
|
||||
|
|
Loading…
Reference in a new issue