blockchain sync: reduce disk writes from 2 to 1 per tx

This commit is contained in:
jeffro256
2024-01-17 17:17:16 -06:00
parent 1bd57c8e95
commit c069c04ede
25 changed files with 1185 additions and 1171 deletions

View File

@@ -598,7 +598,7 @@ public:
cryptonote::tx_verification_context tvc = AUTO_VAL_INIT(tvc);
size_t pool_size = m_c.get_pool_transactions_count();
m_c.handle_incoming_tx({t_serializable_object_to_blob(tx), crypto::null_hash}, tvc, m_tx_relay, false);
m_c.handle_incoming_tx(t_serializable_object_to_blob(tx), tvc, m_tx_relay, false);
bool tx_added = pool_size + 1 == m_c.get_pool_transactions_count();
bool r = m_validator.check_tx_verification_context(tvc, tx_added, m_ev_index, tx);
CHECK_AND_NO_ASSERT_MES(r, false, "tx verification context check failed");
@@ -609,16 +609,17 @@ public:
{
log_event("cryptonote::transaction");
std::vector<cryptonote::tx_blob_entry> tx_blobs;
std::vector<cryptonote::blobdata> tx_blobs;
std::vector<cryptonote::tx_verification_context> tvcs;
cryptonote::tx_verification_context tvc0 = AUTO_VAL_INIT(tvc0);
for (const auto &tx: txs)
{
tx_blobs.push_back({t_serializable_object_to_blob(tx)});
tx_blobs.emplace_back(t_serializable_object_to_blob(tx));
tvcs.push_back(tvc0);
}
size_t pool_size = m_c.get_pool_transactions_count();
m_c.handle_incoming_txs(tx_blobs, tvcs, m_tx_relay, false);
for (size_t i = 0; i < tx_blobs.size(); ++i)
m_c.handle_incoming_tx(tx_blobs[i], tvcs[i], m_tx_relay, false);
size_t tx_added = m_c.get_pool_transactions_count() - pool_size;
bool r = m_validator.check_tx_verification_context_array(tvcs, tx_added, m_ev_index, txs);
CHECK_AND_NO_ASSERT_MES(r, false, "tx verification context check failed");

View File

@@ -43,7 +43,10 @@ class P2PTest():
self.create()
self.mine(80)
self.test_p2p_reorg()
self.test_p2p_tx_propagation()
txid = self.test_p2p_tx_propagation()
self.test_p2p_block_propagation_shared(txid)
txid = self.test_p2p_tx_propagation()
self.test_p2p_block_propagation_new(txid)
def reset(self):
print('Resetting blockchain')
@@ -157,7 +160,6 @@ class P2PTest():
loops -= 1
assert loops >= 0
def test_p2p_tx_propagation(self):
print('Testing P2P tx propagation')
daemon2 = Daemon(idx = 2)
@@ -182,6 +184,107 @@ class P2PTest():
assert len(res.tx_hashes) == 1
assert res.tx_hashes[0] == txid
return txid
def test_p2p_block_propagation_shared(self, mempool_txid):
print('Testing P2P block propagation with shared TX')
daemon2 = Daemon(idx = 2)
daemon3 = Daemon(idx = 3)
# check precondition: txid in daemon2's and daemon3's mempool
res = daemon2.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
res = daemon3.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
# mine block on daemon2
res = daemon2.generateblocks('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 1)
block_height = res.height
# wait until both are synced, or 5 seconds, whichever is first.
# the timeout time should be very, very low here since block propagation, unlike tx propagation, is designed
# to be as fast as possible.
# and since both daemons already have the tx in their mempools, a notification of a new fluffy block, which is
# pushed out immediately upon being mined, should result in an instant addition of that block to the chain,
# without any round trips.
# this test should only fail if you're running it on a potato where PoW verification + check_tx_inputs() takes
# more than 5 second.
deadline = time.monotonic() + 5
result = None
while result is None:
res2 = daemon2.get_info()
res3 = daemon3.get_info()
if res2.top_block_hash == res3.top_block_hash:
result = True
elif time.monotonic() > deadline:
result = False
else:
time.sleep(.25)
assert result, "Shared tx block propagation timed out"
# check the tx is moved to both daemons's blockchains at top block
for daemon in [daemon2, daemon3]:
res = daemon.get_transaction_pool_hashes()
assert not 'tx_hashes' in res or len(res.tx_hashes) == 0
res = daemon.get_transactions([mempool_txid])
assert len(res.get('txs', [])) == 1
tx_details = res.txs[0]
assert ('in_pool' not in tx_details) or (not tx_details.in_pool)
assert tx_details.block_height == block_height
def test_p2p_block_propagation_new(self, mempool_txid):
# there's a big problem with this testcase in that there's not yet a way to prevent daemon's from syncing
# mempools only, but still allow block propagation. so there's a good chance that the transaction will be synced
# between daemons between when daemon2's mempool is flushed and when daemon3 mines a new block. in this
# scenario, this testcase basically just degenerates into test_p2p_block_propagation_shared(). however, if this
# one ever fails but test_p2p_block_propagation_shared() passes, then we might have actually caught a problem
# with block propagation when one of the daemons is missing a tx(s)
print('Testing P2P block propagation with (possibly) new TX')
daemon2 = Daemon(idx = 2)
daemon3 = Daemon(idx = 3)
# check precondition: txid in daemon3's mempool
res = daemon3.get_transaction_pool_hashes()
assert mempool_txid in res.get('tx_hashes', [])
# flush daemon2 mempool
daemon2.flush_txpool()
# mine block on daemon3
res = daemon3.generateblocks('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 1)
block_height = res.height
# wait until both are synced, or 5 seconds, whichever is first.
# the timeout time should be very, very low here since block propagation, unlike tx propagation, is designed
# to be as fast as possible. however, it might have to be raised if the daemon actually does make a round trip
# to request a missing tx in a fluffy block
deadline = time.monotonic() + 5
result = None
while result is None:
res2 = daemon2.get_info()
res3 = daemon3.get_info()
if res2.top_block_hash == res3.top_block_hash:
result = True
elif time.monotonic() > deadline:
result = False
else:
time.sleep(.25)
assert result, "New tx block propagation timed out"
# check the tx is moved to both daemons's blockchains at top block
for daemon in [daemon2, daemon3]:
res = daemon.get_transaction_pool_hashes()
assert not 'tx_hashes' in res or len(res.tx_hashes) == 0
res = daemon.get_transactions([mempool_txid])
assert len(res.get('txs', [])) == 1
tx_details = res.txs[0]
assert ('in_pool' not in tx_details) or (not tx_details.in_pool)
assert tx_details.block_height == block_height
if __name__ == '__main__':
P2PTest().run_test()

View File

@@ -61,9 +61,10 @@ public:
bool have_block(const crypto::hash& id, int *where = NULL) const {return false;}
bool have_block_unlocked(const crypto::hash& id, int *where = NULL) const {return false;}
void get_blockchain_top(uint64_t& height, crypto::hash& top_id)const{height=0;top_id=crypto::null_hash;}
bool handle_incoming_tx(const cryptonote::tx_blob_entry& tx_blob, cryptonote::tx_verification_context& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_incoming_txs(const std::vector<cryptonote::tx_blob_entry>& tx_blob, std::vector<cryptonote::tx_verification_context>& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_incoming_tx(const cryptonote::blobdata& tx_blob, cryptonote::tx_verification_context& tvc, cryptonote::relay_method tx_relay, bool relayed) { return true; }
bool handle_single_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *b, cryptonote::block_verification_context& bvc, cryptonote::pool_supplement& extra_block_txs, bool update_miner_blocktemplate = true) { return true; }
bool handle_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *block, cryptonote::block_verification_context& bvc, bool update_miner_blocktemplate = true) { return true; }
bool handle_incoming_block(const cryptonote::blobdata& block_blob, const cryptonote::block *block, cryptonote::block_verification_context& bvc, cryptonote::pool_supplement& extra_block_txs, bool update_miner_blocktemplate = true) { return true; }
void pause_mine(){}
void resume_mine(){}
bool on_idle(){return true;}
@@ -74,6 +75,7 @@ public:
bool get_test_drop_download_height() const {return true;}
bool prepare_handle_incoming_blocks(const std::vector<cryptonote::block_complete_entry> &blocks_entry, std::vector<cryptonote::block> &blocks) { return true; }
bool cleanup_handle_incoming_blocks(bool force_sync = false) { return true; }
bool check_incoming_block_size(const cryptonote::blobdata& block_blob) const { return true; }
bool update_checkpoints(const bool skip_dns = false) { return true; }
uint64_t get_target_blockchain_height() const { return 1; }
size_t get_block_sync_size(uint64_t height) const { return BLOCKS_SYNCHRONIZING_DEFAULT_COUNT; }
@@ -82,6 +84,7 @@ public:
bool get_pool_transaction(const crypto::hash& id, cryptonote::blobdata& tx_blob, cryptonote::relay_category tx_category) const { return false; }
bool pool_has_tx(const crypto::hash &txid) const { return false; }
bool get_blocks(uint64_t start_offset, size_t count, std::vector<std::pair<cryptonote::blobdata, cryptonote::block>>& blocks, std::vector<cryptonote::blobdata>& txs) const { return false; }
bool get_transactions(const std::vector<crypto::hash>& txs_ids, std::vector<cryptonote::blobdata>& txs, std::vector<crypto::hash>& missed_txs, bool pruned = false) const { return false; }
bool get_transactions(const std::vector<crypto::hash>& txs_ids, std::vector<cryptonote::transaction>& txs, std::vector<crypto::hash>& missed_txs) const { return false; }
bool get_block_by_hash(const crypto::hash &h, cryptonote::block &blk, bool *orphan = NULL) const { return false; }
uint8_t get_ideal_hard_fork_version() const { return 0; }
@@ -89,7 +92,6 @@ public:
uint8_t get_hard_fork_version(uint64_t height) const { return 0; }
uint64_t get_earliest_ideal_height_for_version(uint8_t version) const { return 0; }
cryptonote::difficulty_type get_block_cumulative_difficulty(uint64_t height) const { return 0; }
bool fluffy_blocks_enabled() const { return false; }
uint64_t prevalidate_block_hashes(uint64_t height, const std::vector<crypto::hash> &hashes, const std::vector<uint64_t> &weights) { return 0; }
bool pad_transactions() { return false; }
uint32_t get_blockchain_pruning_seed() const { return 0; }