From 1d266a94ba712adafacbd7276b1e19485c2ae7a5 Mon Sep 17 00:00:00 2001 From: PiRK Date: Mon, 14 Apr 2025 18:03:32 +0200 Subject: [PATCH 1/7] assumeutxo: Check deserialized coins for out of range values Summary: This is another small backport that can only be tested after we have the loadtxoutset RPC command, but that I think should be included before we allow loadng UTXO snapshot because it may guard against amount malleation in snapshot files (see https://github.com/bitcoin/bitcoin/pull/28685#issuecomment-1770720541 "certain negative values seem to result in the same hash as their positive counter part") This is a partial backport of core#28685 https://github.com/bitcoin/bitcoin/pull/28685/commits/f6213929c519d0e615cacd3d6f479f1517be1662 Test Plan: `ninja all check-all` tested on my dev branch in feature_assumeutxo.py Reviewers: #bitcoin_abc Differential Revision: https://reviews.bitcoinabc.org/D17930 --- src/validation.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index cda7ba3fb6..80e6d086bf 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -6702,6 +6702,12 @@ bool ChainstateManager::PopulateAndValidateSnapshot( coins_count - coins_left); return false; } + if (!MoneyRange(coin.GetTxOut().nValue)) { + LogPrintf("[snapshot] bad snapshot data after deserializing %d " + "coins - bad tx out value\n", + coins_count - coins_left); + return false; + } coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin)); From 93b63f8cf602158493eea0050f90e014ed78a1a6 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 29 Mar 2019 15:31:54 -0400 Subject: [PATCH 2/7] rpc: add loadtxoutset Co-authored-by: Sebastian Falbesoner https://github.com/bitcoin/bitcoin/pull/27596/commits/ce585a9a158476b0ad3296477b922e79f308e795 core#28652 rpc: Do not wait for headers inside loadtxoutset core#29345 --- doc/assumeutxo.md | 2 +- doc/release-notes-27596.md | 26 ++++++++++ src/rpc/blockchain.cpp | 103 ++++++++++++++++++++++++++++++++++++- 3 files changed, 128 insertions(+), 3 deletions(-) create mode 100644 doc/release-notes-27596.md diff --git a/doc/assumeutxo.md b/doc/assumeutxo.md index 7f6784c52a..816ca8c78a 100644 --- a/doc/assumeutxo.md +++ b/doc/assumeutxo.md @@ -3,7 +3,7 @@ Assumeutxo is a feature that allows fast bootstrapping of a validating bitcoind instance with a very similar security model to assumevalid. -The RPC commands `dumptxoutset` and `loadtxoutset` (yet to be merged) are used to +The RPC commands `dumptxoutset` and `loadtxoutset` are used to respectively generate and load UTXO snapshots. The utility script `./contrib/devtools/utxo_snapshot.sh` may be of use. diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md new file mode 100644 index 0000000000..da96b36189 --- /dev/null +++ b/doc/release-notes-27596.md @@ -0,0 +1,26 @@ +Pruning +------- + +When using assumeutxo with `-prune`, the prune budget may be exceeded if it is set +lower than 1100MB (i.e. `MIN_DISK_SPACE_FOR_BLOCK_FILES * 2`). Prune budget is normally +split evenly across each chainstate, unless the resulting prune budget per chainstate +is beneath `MIN_DISK_SPACE_FOR_BLOCK_FILES` in which case that value will be used. + +RPC +--- + +`loadtxoutset` has been added, which allows loading a UTXO snapshot of the format +generated by `dumptxoutset`. Once this snapshot is loaded, its contents will be +deserialized into a second chainstate data structure, which is then used to sync to +the network's tip under a security model very much like `assumevalid`. + +Meanwhile, the original chainstate will complete the initial block download process in +the background, eventually validating up to the block that the snapshot is based upon. + +The result is a usable bitcoind instance that is current with the network tip in a +matter of minutes rather than hours. UTXO snapshot are typically obtained via +third-party sources (HTTP, torrent, etc.) which is reasonable since their contents +are always checked by hash. + +You can find more information on this process in the `assumeutxo` design +document (). diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index e0c7c54115..86a01e0a18 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -2623,7 +2624,7 @@ static RPCHelpMan getblockfilter() { static RPCHelpMan dumptxoutset() { return RPCHelpMan{ "dumptxoutset", - "Write the serialized UTXO set to disk.\n", + "Write the serialized UTXO set to a file.\n", { {"path", RPCArg::Type::STR, RPCArg::Optional::NO, "path to the output file. If relative, will be prefixed by " @@ -2755,6 +2756,103 @@ UniValue CreateUTXOSnapshot(NodeContext &node, Chainstate &chainstate, return result; } +static RPCHelpMan loadtxoutset() { + return RPCHelpMan{ + "loadtxoutset", + "Load the serialized UTXO set from a file.\n" + "Once this snapshot is loaded, its contents will be deserialized into " + "a second chainstate data structure, which is then used to sync to the " + "network's tip. " + "Meanwhile, the original chainstate will complete the initial block " + "download process in the background, eventually validating up to the " + "block that the snapshot is based upon.\n\n" + "The result is a usable bitcoind instance that is current with the " + "network tip in a matter of minutes rather than hours. UTXO snapshot " + "are typically obtained from third-party sources (HTTP, torrent, etc.) " + "which is reasonable since their contents are always checked by " + "hash.\n\n" + "You can find more information on this process in the `assumeutxo` " + "design document " + "().", + { + {"path", RPCArg::Type::STR, RPCArg::Optional::NO, + "path to the snapshot file. If relative, will be prefixed by " + "datadir."}, + }, + RPCResult{RPCResult::Type::OBJ, + "", + "", + { + {RPCResult::Type::NUM, "coins_loaded", + "the number of coins loaded from the snapshot"}, + {RPCResult::Type::STR_HEX, "tip_hash", + "the hash of the base of the snapshot"}, + {RPCResult::Type::NUM, "base_height", + "the height of the base of the snapshot"}, + {RPCResult::Type::STR, "path", + "the absolute path that the snapshot was loaded from"}, + }}, + RPCExamples{HelpExampleCli("loadtxoutset", "utxo.dat")}, + [&](const RPCHelpMan &self, const Config &config, + const JSONRPCRequest &request) -> UniValue { + NodeContext &node = EnsureAnyNodeContext(request.context); + ChainstateManager &chainman = EnsureChainman(node); + fs::path path{AbsPathForConfigVal( + EnsureArgsman(node), fs::u8path(request.params[0].get_str()))}; + + FILE *file{fsbridge::fopen(path, "rb")}; + AutoFile afile{file}; + if (afile.IsNull()) { + throw JSONRPCError(RPC_INVALID_PARAMETER, + "Couldn't open file " + path.u8string() + + " for reading."); + } + + SnapshotMetadata metadata; + afile >> metadata; + + BlockHash base_blockhash = metadata.m_base_blockhash; + if (!chainman.GetParams() + .AssumeutxoForBlockhash(base_blockhash) + .has_value()) { + throw JSONRPCError( + RPC_INTERNAL_ERROR, + strprintf("Unable to load UTXO snapshot, " + "assumeutxo block hash in snapshot metadata not " + "recognized (%s)", + base_blockhash.ToString())); + } + CBlockIndex *snapshot_start_block = WITH_LOCK( + ::cs_main, + return chainman.m_blockman.LookupBlockIndex(base_blockhash)); + + if (!snapshot_start_block) { + throw JSONRPCError( + RPC_INTERNAL_ERROR, + strprintf("The base block header (%s) must appear in the " + "headers chain. Make sure all headers are " + "syncing, and call this RPC again.", + base_blockhash.ToString())); + } + if (!chainman.ActivateSnapshot(afile, metadata, false)) { + throw JSONRPCError(RPC_INTERNAL_ERROR, + "Unable to load UTXO snapshot " + + fs::PathToString(path)); + } + CBlockIndex *new_tip{ + WITH_LOCK(::cs_main, return chainman.ActiveTip())}; + + UniValue result(UniValue::VOBJ); + result.pushKV("coins_loaded", metadata.m_coins_count); + result.pushKV("tip_hash", new_tip->GetBlockHash().ToString()); + result.pushKV("base_height", new_tip->nHeight); + result.pushKV("path", fs::PathToString(path)); + return result; + }, + }; +} + void RegisterBlockchainRPCCommands(CRPCTable &t) { // clang-format off static const CRPCCommand commands[] = { @@ -2778,13 +2876,14 @@ void RegisterBlockchainRPCCommands(CRPCTable &t) { { "blockchain", preciousblock, }, { "blockchain", scantxoutset, }, { "blockchain", getblockfilter, }, + { "blockchain", dumptxoutset, }, + { "blockchain", loadtxoutset, }, /* Not shown in help */ { "hidden", invalidateblock, }, { "hidden", parkblock, }, { "hidden", reconsiderblock, }, { "hidden", syncwithvalidationinterfacequeue, }, - { "hidden", dumptxoutset, }, { "hidden", unparkblock, }, { "hidden", waitfornewblock, }, { "hidden", waitforblock, }, From 9b092ff2beabe77a7eb9edd21b56dd94d85b6e08 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 29 Mar 2019 17:55:08 -0400 Subject: [PATCH 3/7] rpc: add getchainstates Co-authored-by: Ryan Ofsky https://github.com/bitcoin/bitcoin/pull/27596/commits/0f64bac6030334d798ae205cd7af4bf248feddd9 https://github.com/bitcoin/bitcoin/pull/28590/commits/a9ef702a877a964bac724a56e2c0b5bee4ea7586 (partial, TODO functional test) --- doc/release-notes-27596.md | 2 + src/rpc/blockchain.cpp | 92 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md index da96b36189..799b82643f 100644 --- a/doc/release-notes-27596.md +++ b/doc/release-notes-27596.md @@ -24,3 +24,5 @@ are always checked by hash. You can find more information on this process in the `assumeutxo` design document (). + +`getchainstates` has been added to aid in monitoring the assumeutxo sync process. diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 86a01e0a18..b9e9f446f0 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2853,6 +2853,97 @@ static RPCHelpMan loadtxoutset() { }; } +const std::vector RPCHelpForChainstate{ + {RPCResult::Type::NUM, "blocks", "number of blocks in this chainstate"}, + {RPCResult::Type::STR_HEX, "bestblockhash", "blockhash of the tip"}, + {RPCResult::Type::NUM, "difficulty", "difficulty of the tip"}, + {RPCResult::Type::NUM, "verificationprogress", + "progress towards the network tip"}, + {RPCResult::Type::STR_HEX, "snapshot_blockhash", /*optional=*/true, + "the base block of the snapshot this chainstate is based on, if any"}, + {RPCResult::Type::NUM, "coins_db_cache_bytes", "size of the coinsdb cache"}, + {RPCResult::Type::NUM, "coins_tip_cache_bytes", + "size of the coinstip cache"}, + {RPCResult::Type::BOOL, "validated", + "whether the chainstate is fully validated. True if all blocks in the " + "chainstate were validated, false if the chain is based on a snapshot and " + "the snapshot has not yet been validated."}, + +}; + +static RPCHelpMan getchainstates() { + return RPCHelpMan{ + "getchainstates", + "\nReturn information about chainstates.\n", + {}, + RPCResult{RPCResult::Type::OBJ, + "", + "", + { + {RPCResult::Type::NUM, "headers", + "the number of headers seen so far"}, + {RPCResult::Type::ARR, + "chainstates", + "list of the chainstates ordered by work, with the " + "most-work (active) chainstate last", + { + {RPCResult::Type::OBJ, "", "", RPCHelpForChainstate}, + }}, + }}, + RPCExamples{HelpExampleCli("getchainstates", "") + + HelpExampleRpc("getchainstates", "")}, + [&](const RPCHelpMan &self, const Config &config, + const JSONRPCRequest &request) -> UniValue { + LOCK(cs_main); + UniValue obj(UniValue::VOBJ); + + ChainstateManager &chainman = EnsureAnyChainman(request.context); + + auto make_chain_data = + [&](const Chainstate &cs, + bool validated) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + AssertLockHeld(::cs_main); + UniValue data(UniValue::VOBJ); + if (!cs.m_chain.Tip()) { + return data; + } + const CChain &chain = cs.m_chain; + const CBlockIndex *tip = chain.Tip(); + + data.pushKV("blocks", chain.Height()); + data.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); + data.pushKV("difficulty", GetDifficulty(tip)); + data.pushKV( + "verificationprogress", + GuessVerificationProgress(Params().TxData(), tip)); + data.pushKV("coins_db_cache_bytes", + cs.m_coinsdb_cache_size_bytes); + data.pushKV("coins_tip_cache_bytes", + cs.m_coinstip_cache_size_bytes); + if (cs.m_from_snapshot_blockhash) { + data.pushKV("snapshot_blockhash", + cs.m_from_snapshot_blockhash->ToString()); + } + data.pushKV("validated", validated); + return data; + }; + + obj.pushKV("headers", chainman.m_best_header + ? chainman.m_best_header->nHeight + : -1); + + const auto &chainstates = chainman.GetAll(); + UniValue obj_chainstates{UniValue::VARR}; + for (Chainstate *cs : chainstates) { + obj_chainstates.push_back( + make_chain_data(*cs, !cs->m_from_snapshot_blockhash || + chainstates.size() == 1)); + } + obj.pushKV("chainstates", std::move(obj_chainstates)); + return obj; + }}; +} + void RegisterBlockchainRPCCommands(CRPCTable &t) { // clang-format off static const CRPCCommand commands[] = { @@ -2878,6 +2969,7 @@ void RegisterBlockchainRPCCommands(CRPCTable &t) { { "blockchain", getblockfilter, }, { "blockchain", dumptxoutset, }, { "blockchain", loadtxoutset, }, + { "blockchain", getchainstates, }, /* Not shown in help */ { "hidden", invalidateblock, }, From 9951ca5c3b90564cf0b586ae62abfcd403e60544 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 17 Jun 2021 16:09:38 -0400 Subject: [PATCH 4/7] test: add feature_assumeutxo functional test https://github.com/bitcoin/bitcoin/pull/27596/commits/42cae39356fd20d521aaf99aff1ed85856f3c9f3 core#28589 (race fixes) core#28590 (getchainstates return a list of chainstates) core#28625 ( check that loading snapshot not matching AssumeUTXO parameters fails) https://github.com/bitcoin/bitcoin/pull/28639/commits/fafde92f84fb7c245bc3c1cd946a32c891861e5e core#28647 (Add assumeutxo test for wrong hash) core#28652 (fail early if snapshot block hash doesn't match AssumeUTXO parameters) core#28562 (`self.no_op`, `self.wait_until`) core#28666 (assumeutxo file with unknown block hash) core#28669 (check au file with changed outpoint index) core#28685 (add tests for coin maleation) Bitcoin ABC doesn't have the same malleation issue as Bitcoin Core, it was implemented correctly in D512: - Core : `it->second.nHeight * 2 + it->second.fCoinBase ? 1u : 0u` (problem of operator priority, missing parentheses) - ABC : `it->second.GetHeight() * 2 + it->second.IsCoinBase()` Grab the tests anyway core#29215 (spend coin from snapshot chainstate after loading) https://github.com/bitcoin/bitcoin/pull/29726/commits/b7ba60f81a33db876f88b5f9af1e5025d679b5be (add coverage for -reindex and assumeutxo) core#29354 (Assumeutxo with more than just coinbase transactions) core#29394 (Add test to ensure failure when mempool not empty) https://github.com/bitcoin/bitcoin/pull/29478/commits/2bc1ecfaa9b69a20388e913ec64967de2f506cd3 (Remove unnecessary sync_blocks in assumeutxo tests) core#29478 (Add test for loadtxoutset when headers are not synced) ----- assumeutxo test: Add RPC test for unset nTx and nChainTx values https://github.com/bitcoin/bitcoin/pull/29370/commits/f252e687ec94b6ccafb5bc44b7df3daeb473fdea https://github.com/bitcoin/bitcoin/pull/29370/commits/ef29c8b662309a438121a83f27fd7bdd1779700c (functional test only) ----------- test: assumeutxo snapshot block CheckBlockIndex crash test Add a test for a CheckBlockIndex crash that would happen before previous "assumeutxo: Get rid of faked nTx and nChainTx values" commit. The crash was an assert failure in the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check that would previously happen if the snapshot block was submitted after loading the snapshot and downloading a few blocks after the snapshot. In that case ReceivedBlockTransactions() previously would overwrite the nChainTx value of the submitted snapshot block with a fake value based on the previous block, so the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check would later fail on the first block after the snapshot. This test was originally posted by Martin Zumsande in https://github.com/bitcoin/bitcoin/pull/29370#issuecomment-1974096225 Co-authored-by: Martin Zumsande i fixme: commit or pr number ------------ core#29617 (test for coin_height > base_height & amount > money_supply) https://github.com/bitcoin/bitcoin/pull/28685/commits/f6213929c519d0e615cacd3d6f479f1517be1662 (Check deserialized coins for out of range values) core#30053 (coverage for "Couldn't open file..." error) core#29973 (ensure failure when importing a snapshot twice) --- src/kernel/chainparams.cpp | 9 + test/functional/feature_assumeutxo.py | 550 ++++++++++++++++++ .../test_framework/test_framework.py | 16 +- test/functional/test_framework/txtools.py | 19 +- test/functional/test_framework/wallet.py | 2 +- 5 files changed, 587 insertions(+), 9 deletions(-) create mode 100755 test/functional/feature_assumeutxo.py diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 35359d582d..8d3fa0b479 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -485,6 +485,15 @@ class CRegTestParams : public CChainParams { .blockhash = BlockHash{uint256S("0x47cfb2b77860d250060e78d3248bb05092876545" "3cbcbdbc121e3c48b99a376c")}}, + {// For use by test/functional/feature_assumeutxo.py + .height = 299, + .hash_serialized = + AssumeutxoHash{uint256S("0xa966794ed5a2f9debaefc7ca48dbc5d5e12" + "a89ff9fe45bd00ec5732d074580a9")}, + .nChainTx = 334, + .blockhash = + BlockHash{uint256S("0x118a7d5473bccce9b314789e14ce426fc65fb09d" + "feda0131032bb6d86ed2fd0b")}}, }; chainTxData = ChainTxData{0, 0, 0}; diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py new file mode 100755 index 0000000000..1d5b2e1a01 --- /dev/null +++ b/test/functional/feature_assumeutxo.py @@ -0,0 +1,550 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test for assumeutxo, a means of quickly bootstrapping a node using +a serialized version of the UTXO set at a certain height, which corresponds +to a hash that has been compiled into bitcoind. + +The assumeutxo value generated and used here is committed to in +`CRegTestParams::m_assumeutxo_data` in `src/kernel/chainparams.cpp`. + +## Possible test improvements + +Interesting test cases could be loading an assumeutxo snapshot file with: + +- TODO: Valid snapshot file, but referencing a snapshot block that turns out to be + invalid, or has an invalid parent +- TODO: Valid snapshot file and snapshot block, but the block is not on the + most-work chain + +Interesting starting states could be loading a snapshot when the current chain tip is: + +- TODO: An ancestor of snapshot block +- TODO: Not an ancestor of the snapshot block but has less work +- TODO: The snapshot block +- TODO: A descendant of the snapshot block +- TODO: Not an ancestor or a descendant of the snapshot block and has more work + +""" +import os +from dataclasses import dataclass + +from test_framework.messages import CTransaction, FromHex +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error +from test_framework.wallet import MiniWallet, getnewdestination + +START_HEIGHT = 199 +SNAPSHOT_BASE_HEIGHT = 299 +FINAL_HEIGHT = 399 +COMPLETE_IDX = {"synced": True, "best_block_height": FINAL_HEIGHT} + + +class AssumeutxoTest(BitcoinTestFramework): + def set_test_params(self): + """Use the pregenerated, deterministic chain up to height 199.""" + self.num_nodes = 3 + self.rpc_timeout = 120 + self.extra_args = [ + [], + ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"], + [ + "-persistmempool=0", + "-txindex=1", + "-blockfilterindex=1", + "-coinstatsindex=1", + ], + ] + + def setup_network(self): + """Start with the nodes disconnected so that one can generate a snapshot + including blocks the other hasn't yet seen.""" + self.add_nodes(3) + self.start_nodes(extra_args=self.extra_args) + + def test_invalid_snapshot_scenarios(self, valid_snapshot_path): + self.log.info("Test different scenarios of loading invalid snapshot files") + with open(valid_snapshot_path, "rb") as f: + valid_snapshot_contents = f.read() + bad_snapshot_path = valid_snapshot_path + ".mod" + + def expected_error(log_msg="", rpc_details=""): + with self.nodes[1].assert_debug_log([log_msg]): + assert_raises_rpc_error( + -32603, + f"Unable to load UTXO snapshot{rpc_details}", + self.nodes[1].loadtxoutset, + bad_snapshot_path, + ) + + self.log.info( + " - snapshot file refering to a block that is not in the assumeutxo parameters" + ) + prev_block_hash = self.nodes[0].getblockhash(SNAPSHOT_BASE_HEIGHT - 1) + # Represents any unknown block hash + bogus_block_hash = "0" * 64 + for bad_block_hash in [bogus_block_hash, prev_block_hash]: + with open(bad_snapshot_path, "wb") as f: + # block hash of the snapshot base is stored right at the start (first 32 bytes) + f.write( + bytes.fromhex(bad_block_hash)[::-1] + valid_snapshot_contents[32:] + ) + error_details = f", assumeutxo block hash in snapshot metadata not recognized ({bad_block_hash})" + expected_error(rpc_details=error_details) + + self.log.info(" - snapshot file with wrong number of coins") + valid_num_coins = int.from_bytes(valid_snapshot_contents[32 : 32 + 8], "little") + for off in [-1, +1]: + with open(bad_snapshot_path, "wb") as f: + f.write(valid_snapshot_contents[:32]) + f.write((valid_num_coins + off).to_bytes(8, "little")) + f.write(valid_snapshot_contents[32 + 8 :]) + expected_error( + log_msg=( + "bad snapshot - coins left over after deserializing 298 coins" + if off == -1 + else "bad snapshot format or truncated snapshot after deserializing 299 coins" + ) + ) + + self.log.info(" - snapshot file with alternated UTXO data") + cases = [ + # (content, offset, wrong_hash, custom_message) + # wrong outpoint hash + [ + b"\xff" * 32, + 0, + "74fab8900700c8a0a4c6b50330252d92d651088939a41b307a8fcdddfed65f77", + None, + ], + # wrong outpoint index + [ + (1).to_bytes(4, "little"), + 32, + "3872c8e52554070ca410faff98e42f63c99d08f536be343af7c04143e0e8f2b2", + None, + ], + # wrong coin code VARINT((coinbase ? 1 : 0) | (height << 1)) + # We expect b"\x81" (as seen in a dump of valid_snapshot_path) + [ + b"\x80", + 36, + "b14c9595737179fe57e6d7a9f8e879a440833fa95ba52d210f1f7e3c02be64b2", + None, + ], + # another wrong coin code + [ + b"\x83", + 36, + "296b4acd0d41dd4c0e845a7ed0cbdc602e5a7d092d5b4948a72835d2158f1e8e", + None, + ], + # wrong coin case with height 364 and coinbase 0 + [ + b"\x84\x58", + 36, + None, + "[snapshot] bad snapshot data after deserializing 0 coins", + ], + # Amount exceeds MAX_MONEY + [ + b"\xCA\xD2\x8F\x5A", + 38, + None, + "[snapshot] bad snapshot data after deserializing 0 coins - bad tx out value", + ], + ] + + for content, offset, wrong_hash, custom_message in cases: + with open(bad_snapshot_path, "wb") as f: + f.write(valid_snapshot_contents[: (32 + 8 + offset)]) + f.write(content) + f.write(valid_snapshot_contents[(32 + 8 + offset + len(content)) :]) + + log_msg = ( + custom_message + if custom_message is not None + else f"[snapshot] bad snapshot content hash: expected a966794ed5a2f9debaefc7ca48dbc5d5e12a89ff9fe45bd00ec5732d074580a9, got {wrong_hash}" + ) + expected_error(log_msg=log_msg) + + def test_headers_not_synced(self, valid_snapshot_path): + for node in self.nodes[1:]: + assert_raises_rpc_error( + -32603, + "The base block header (118a7d5473bccce9b314789e14ce426fc65fb09dfeda0131032bb6d86ed2fd0b) must appear in the headers chain. Make sure all headers are syncing, and call this RPC again.", + node.loadtxoutset, + valid_snapshot_path, + ) + + def test_invalid_mempool_state(self, dump_output_path): + self.log.info("Test bitcoind should fail when mempool not empty.") + node = self.nodes[2] + tx = MiniWallet(node).send_self_transfer(from_node=node) + + assert tx["txid"] in node.getrawmempool() + + # Attempt to load the snapshot on Node 2 and expect it to fail + with node.assert_debug_log( + expected_msgs=[ + "[snapshot] can't activate a snapshot when mempool not empty" + ] + ): + assert_raises_rpc_error( + -32603, + "Unable to load UTXO snapshot", + node.loadtxoutset, + dump_output_path, + ) + + self.restart_node(2, extra_args=self.extra_args[2]) + + def test_invalid_file_path(self): + self.log.info("Test bitcoind should fail when file path is invalid.") + node = self.nodes[0] + path = os.path.join(node.datadir, self.chain, "invalid", "path") + assert_raises_rpc_error( + -8, + f"Couldn't open file {path} for reading.", + node.loadtxoutset, + path, + ) + + def run_test(self): + """ + Bring up two (disconnected) nodes, mine some new blocks on the first, + and generate a UTXO snapshot. + + Load the snapshot into the second, ensure it syncs to tip and completes + background validation when connected to the first. + """ + n0 = self.nodes[0] + n1 = self.nodes[1] + n2 = self.nodes[2] + + self.mini_wallet = MiniWallet(n0) + + # Mock time for a deterministic chain + for n in self.nodes: + n.setmocktime(n.getblockheader(n.getbestblockhash())["time"]) + + # Generate a series of blocks that `n0` will have in the snapshot, + # but that n1 and n2 don't yet see. + assert n0.getblockcount() == START_HEIGHT + blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)} + for i in range(100): + block_tx = 1 + if i % 3 == 0: + self.mini_wallet.send_self_transfer(from_node=n0) + block_tx += 1 + self.generate(n0, nblocks=1, sync_fun=self.no_op) + height = n0.getblockcount() + hash_ = n0.getbestblockhash() + blocks[height] = Block( + hash_, block_tx, blocks[height - 1].chain_tx + block_tx + ) + if i == 4: + # Create a stale block that forks off the main chain before the snapshot. + temp_invalid = n0.getbestblockhash() + n0.invalidateblock(temp_invalid) + stale_hash = self.generateblock( + n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op + )["hash"] + n0.invalidateblock(stale_hash) + n0.reconsiderblock(temp_invalid) + stale_block = n0.getblock(stale_hash, 0) + + self.log.info("-- Testing assumeutxo + some indexes + pruning") + + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}") + dump_output = n0.dumptxoutset("utxos.dat") + + self.log.info("Test loading snapshot when headers are not synced") + self.test_headers_not_synced(dump_output["path"]) + + # In order for the snapshot to activate, we have to ferry over the new + # headers to n1 and n2 so that they see the header of the snapshot's + # base block while disconnected from n0. + for i in range(1, 300): + block = n0.getblock(n0.getblockhash(i), 0) + # make n1 and n2 aware of the new header, but don't give them the + # block. + n1.submitheader(block) + n2.submitheader(block) + + # Ensure everyone is seeing the same headers. + for n in self.nodes: + assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT) + + assert_equal( + dump_output["txoutset_hash"], + "a966794ed5a2f9debaefc7ca48dbc5d5e12a89ff9fe45bd00ec5732d074580a9", + ) + assert_equal(dump_output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This + # will allow us to test n1's sync-to-tip on top of a snapshot. + self.generate(n0, nblocks=100, sync_fun=self.no_op) + + assert_equal(n0.getblockcount(), FINAL_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + self.test_invalid_mempool_state(dump_output["path"]) + self.test_invalid_snapshot_scenarios(dump_output["path"]) + self.test_invalid_file_path() + + self.log.info(f"Loading snapshot into second node from {dump_output['path']}") + loaded = n1.loadtxoutset(dump_output["path"]) + assert_equal(loaded["coins_loaded"], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded["base_height"], SNAPSHOT_BASE_HEIGHT) + + def check_tx_counts(final: bool) -> None: + """Check nTx and nChainTx intermediate values right after loading + the snapshot, and final values after the snapshot is validated.""" + for height, block in blocks.items(): + tx = n1.getblockheader(block.hash)["nTx"] + chain_tx = n1.getchaintxstats(nblocks=1, blockhash=block.hash)[ + "txcount" + ] + + # Intermediate nTx of the starting block should be set, but nTx of + # later blocks should be 0 before they are downloaded + if final or height == START_HEIGHT: + assert_equal(tx, block.tx) + else: + assert_equal(tx, 0) + + # Intermediate nChainTx of the starting block and snapshot block + # should be set, but others should be 0 until they are downloaded. + if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT): + assert_equal(chain_tx, block.chain_tx) + else: + assert_equal(chain_tx, 0) + + check_tx_counts(final=False) + + normal, snapshot = n1.getchainstates()["chainstates"] + assert_equal(normal["blocks"], START_HEIGHT) + assert_equal(normal.get("snapshot_blockhash"), None) + assert_equal(normal["validated"], True) + assert_equal(snapshot["blocks"], SNAPSHOT_BASE_HEIGHT) + assert_equal(snapshot["snapshot_blockhash"], dump_output["base_hash"]) + assert_equal(snapshot["validated"], False) + + assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + self.log.info( + "Submit a stale block that forked off the chain before the snapshot" + ) + # Normally a block like this would not be downloaded, but if it is + # submitted early before the background chain catches up to the fork + # point, it winds up in m_blocks_unlinked and triggers a corner case + # that previously crashed CheckBlockIndex. + n1.submitblock(stale_block) + n1.getchaintips() + n1.getblock(stale_hash) + + self.log.info( + "Submit a spending transaction for a snapshot chainstate coin to the mempool" + ) + # spend the coinbase output of the first block that is not available on node1 + spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1) + assert_raises_rpc_error( + -1, "Block not found on disk", n1.getblock, spend_coin_blockhash + ) + prev_tx = n0.getblock(spend_coin_blockhash, 3)["tx"][0] + prevout = { + "txid": prev_tx["txid"], + "vout": 0, + "scriptPubKey": prev_tx["vout"][0]["scriptPubKey"]["hex"], + "amount": prev_tx["vout"][0]["value"], + } + privkey = n0.get_deterministic_priv_key().key + raw_tx = n1.createrawtransaction( + [prevout], {getnewdestination()[2]: 24_990_000} + ) + signed_tx = n1.signrawtransactionwithkey(raw_tx, [privkey], [prevout])["hex"] + signed_txid = FromHex(CTransaction(), signed_tx).rehash() + + assert n1.gettxout(prev_tx["txid"], 0) is not None + n1.sendrawtransaction(signed_tx) + assert signed_txid in n1.getrawmempool() + assert not n1.gettxout(prev_tx["txid"], 0) + + PAUSE_HEIGHT = FINAL_HEIGHT - 40 + + self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) + self.restart_node( + 1, extra_args=[f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]] + ) + + # Finally connect the nodes and let them sync. + # + # Set `wait_for_connect=False` to avoid a race between performing connection + # assertions and the -stopatheight tripping. + self.connect_nodes(0, 1, wait_for_connect=False) + + n1.wait_until_stopped(timeout=5) + + self.log.info("Checking that blocks are segmented on disk") + assert self.has_blockfile(n1, "00000"), "normal blockfile missing" + assert self.has_blockfile(n1, "00001"), "assumed blockfile missing" + assert not self.has_blockfile(n1, "00002"), "too many blockfiles" + + self.log.info( + "Restarted node before snapshot validation completed, reloading..." + ) + self.restart_node(1, extra_args=self.extra_args[1]) + + # Send snapshot block to n1 out of order. This makes the test less + # realistic because normally the snapshot block is one of the last + # blocks downloaded, but its useful to test because it triggers more + # corner cases in ReceivedBlockTransactions() and CheckBlockIndex() + # setting and testing nChainTx values, and it exposed previous bugs. + snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT) + snapshot_block = n0.getblock(snapshot_hash, 0) + n1.submitblock(snapshot_block) + + self.connect_nodes(0, 1) + + self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") + self.wait_until( + lambda: n1.getchainstates()["chainstates"][-1]["blocks"] == FINAL_HEIGHT + ) + self.sync_blocks(nodes=(n0, n1)) + + self.log.info("Ensuring background validation completes") + self.wait_until(lambda: len(n1.getchainstates()["chainstates"]) == 1) + + # Ensure indexes have synced. + completed_idx_state = { + "basic block filter index": COMPLETE_IDX, + "coinstatsindex": COMPLETE_IDX, + } + self.wait_until(lambda: n1.getindexinfo() == completed_idx_state) + + self.log.info("Re-check nTx and nChainTx values") + check_tx_counts(final=True) + + for i in (0, 1): + n = self.nodes[i] + self.log.info( + f"Restarting node {i} to ensure (Check|Load)BlockIndex passes" + ) + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + (chainstate,) = n.getchainstates()["chainstates"] + assert_equal(chainstate["blocks"], FINAL_HEIGHT) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + # Node 2: all indexes + reindex + # ----------------------------- + + self.log.info("-- Testing all indexes + reindex") + assert_equal(n2.getblockcount(), START_HEIGHT) + + self.log.info(f"Loading snapshot into third node from {dump_output['path']}") + loaded = n2.loadtxoutset(dump_output["path"]) + assert_equal(loaded["coins_loaded"], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded["base_height"], SNAPSHOT_BASE_HEIGHT) + + for reindex_arg in ["-reindex=1", "-reindex-chainstate=1"]: + self.log.info( + f"Check that restarting with {reindex_arg} will delete the snapshot chainstate" + ) + self.restart_node(2, extra_args=[reindex_arg, *self.extra_args[2]]) + assert_equal(1, len(n2.getchainstates()["chainstates"])) + for i in range(1, 300): + block = n0.getblock(n0.getblockhash(i), 0) + n2.submitheader(block) + loaded = n2.loadtxoutset(dump_output["path"]) + assert_equal(loaded["coins_loaded"], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded["base_height"], SNAPSHOT_BASE_HEIGHT) + + normal, snapshot = n2.getchainstates()["chainstates"] + assert_equal(normal["blocks"], START_HEIGHT) + assert_equal(normal.get("snapshot_blockhash"), None) + assert_equal(normal["validated"], True) + assert_equal(snapshot["blocks"], SNAPSHOT_BASE_HEIGHT) + assert_equal(snapshot["snapshot_blockhash"], dump_output["base_hash"]) + assert_equal(snapshot["validated"], False) + + self.log.info( + "Check that loading the snapshot again will fail because there is already an active snapshot." + ) + with n2.assert_debug_log( + expected_msgs=[ + "[snapshot] can't activate a snapshot-based chainstate more than once" + ] + ): + assert_raises_rpc_error( + -32603, + "Unable to load UTXO snapshot", + n2.loadtxoutset, + dump_output["path"], + ) + + self.connect_nodes(0, 2) + self.wait_until( + lambda: n2.getchainstates()["chainstates"][-1]["blocks"] == FINAL_HEIGHT + ) + self.sync_blocks() + + self.log.info("Ensuring background validation completes") + self.wait_until(lambda: len(n2.getchainstates()["chainstates"]) == 1) + + completed_idx_state = { + "basic block filter index": COMPLETE_IDX, + "coinstatsindex": COMPLETE_IDX, + "txindex": COMPLETE_IDX, + } + self.wait_until(lambda: n2.getindexinfo() == completed_idx_state) + + for i in (0, 2): + n = self.nodes[i] + self.log.info( + f"Restarting node {i} to ensure (Check|Load)BlockIndex passes" + ) + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + (chainstate,) = n.getchainstates()["chainstates"] + assert_equal(chainstate["blocks"], FINAL_HEIGHT) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + self.log.info("Test -reindex-chainstate of an assumeutxo-synced node") + self.restart_node(2, extra_args=["-reindex-chainstate=1", *self.extra_args[2]]) + assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) + + self.log.info("Test -reindex of an assumeutxo-synced node") + self.restart_node(2, extra_args=["-reindex=1", *self.extra_args[2]]) + self.connect_nodes(0, 2) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) + + +@dataclass +class Block: + hash: str + tx: int + chain_tx: int + + +if __name__ == "__main__": + AssumeutxoTest().main() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a3dcf9e69d..fb0e99b8c5 100644 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -670,7 +670,14 @@ def restart_node(self, i, extra_args=None): def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) - def connect_nodes(self, a, b): + def connect_nodes(self, a, b, *, wait_for_connect: bool = True): + """ + Kwargs: + wait_for_connect: if True, block until the nodes are verified as connected. You might + want to disable this when using -stopatheight with one of the connected nodes, + since there will be a race between the actual connection and performing + the assertions before one node shuts down. + """ from_connection = self.nodes[a] to_connection = self.nodes[b] @@ -680,6 +687,9 @@ def connect_nodes(self, a, b): ip_port = f"{host}:{str(to_connection.p2p_port)}" from_connection.addnode(ip_port, "onetry") + if not wait_for_connect: + return + # Use subversion as peer id. Test nodes have their node number appended to the user agent string from_connection_subver = from_connection.getnetworkinfo()["subversion"] to_connection_subver = to_connection.getnetworkinfo()["subversion"] @@ -1133,3 +1143,7 @@ def is_zmq_compiled(self): def is_usdt_compiled(self): """Checks whether the USDT tracepoints were compiled.""" return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS") + + def has_blockfile(self, node, filenum: str): + blocksdir = os.path.join(node.datadir, self.chain, "blocks", "") + return os.path.isfile(os.path.join(blocksdir, f"blk{filenum}.dat")) diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py index c1645140ff..032c88ef3e 100644 --- a/test/functional/test_framework/txtools.py +++ b/test/functional/test_framework/txtools.py @@ -9,7 +9,7 @@ VOUT_VALUE_SIZE = 8 -def pad_tx(tx: CTransaction, pad_to_size: int = MIN_TX_SIZE): +def pad_tx(tx: CTransaction, pad_to_size: int = MIN_TX_SIZE, deterministic=False): """ Pad a transaction with op_return junk data until it is at least pad_to_size, or leave it alone if it's already bigger than that. @@ -61,18 +61,21 @@ def pad_tx(tx: CTransaction, pad_to_size: int = MIN_TX_SIZE): required_padding -= data_size + VOUT_VALUE_SIZE + 3 - tx.vout.append(CTxOut(0, CScript([OP_RETURN, random.randbytes(data_size)]))) + # Note that this deterministic data affect the assumeutxo/dumptxoutset hash + # used in feature_assumeutxo.py and set in CRegTestParams::m_assumeutxo_data + data = b"\x00" * data_size if deterministic else random.randbytes(data_size) + tx.vout.append(CTxOut(0, CScript([OP_RETURN, data]))) tx.rehash() -def pad_raw_tx(rawtx_hex, min_size=MIN_TX_SIZE): +def pad_raw_tx(rawtx_hex, min_size=MIN_TX_SIZE, deterministic=False): """ Pad a raw transaction with OP_RETURN data until it reaches at least min_size """ tx = CTransaction() FromHex(tx, rawtx_hex) - pad_tx(tx, min_size) + pad_tx(tx, min_size, deterministic) return ToHex(tx) @@ -89,9 +92,11 @@ def rawtx_length(rawtx): return len(bytes.fromhex(rawtx)) def test_size(requested_size, expected_size): - self.assertEqual( - rawtx_length(pad_raw_tx(raw_tx, requested_size)), expected_size - ) + for deterministic in (True, False): + self.assertEqual( + rawtx_length(pad_raw_tx(raw_tx, requested_size, deterministic)), + expected_size, + ) self.assertEqual(rawtx_length(raw_tx), 85) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 94f52ae58d..e9de0699c1 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -350,7 +350,7 @@ def create_self_transfer_multi( ), ) - pad_tx(tx, target_size or 100) + pad_tx(tx, target_size or 100, deterministic=True) txid = tx.rehash() return { From 262f63f1de20ac9caa70a979f339caf3a017e222 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Mon, 13 Nov 2023 15:19:12 +0400 Subject: [PATCH 5/7] test: add assumeutxo wallet test Co-Authored-By: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> core#28838 https://github.com/bitcoin/bitcoin/pull/29354/commits/fa5cd66f0a47d1b759c93d01524ee4558432c0cc https://github.com/bitcoin/bitcoin/pull/29478/commits/2bc1ecfaa9b69a20388e913ec64967de2f506cd3 https://github.com/bitcoin/bitcoin/pull/30678/commits/7e3dbe4180cbeb65e59b53d9fa98509e9189549d (functional test only) https://github.com/bitcoin/bitcoin/pull/30678/commits/f20fe33e94c6752e5d2ed92511c0bf51a10716ee ------ test: add functional test for balance after snapshot completion Use a third node for this, which doesn't get restarted like the second node. This test would fail without the previous commit. fixme: find commit hash ----- test, assumeutxo: import descriptors during background sync https://github.com/bitcoin/bitcoin/pull/30909/commits/595edee169045b6735b76ff9721677f0e43f13e5 --- test/functional/test_framework/wallet.py | 11 ++ test/functional/test_runner.py | 1 + test/functional/wallet_assumeutxo.py | 241 +++++++++++++++++++++++ 3 files changed, 253 insertions(+) create mode 100755 test/functional/wallet_assumeutxo.py diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index e9de0699c1..05ad9bec8d 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -15,6 +15,7 @@ key_to_p2pkh, ) from test_framework.blocktools import COINBASE_MATURITY +from test_framework import cashaddr from test_framework.hash import hash160 from test_framework.key import ECKey from test_framework.messages import XEC, COutPoint, CTransaction, CTxIn, CTxOut @@ -473,3 +474,13 @@ def address_to_scriptpubkey(address): # TODO: also support other address formats else: assert False + + +def cashaddr_to_scriptpubkey(address: str) -> CScript: + """Converts a given CashAddress to the corresponding output script (scriptPubKey).""" + prefix, kind, addr_hash = cashaddr.decode(address) + if kind == cashaddr.PUBKEY_TYPE: + return CScript([OP_DUP, OP_HASH160, addr_hash, OP_EQUALVERIFY, OP_CHECKSIG]) + if kind == cashaddr.SCRIPT_TYPE: + return CScript([OP_HASH160, addr_hash, OP_EQUAL]) + assert False diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 414917af3c..d9709aad91 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -132,6 +132,7 @@ "wallet_createwallet.py": [["--usecli"], ["--descriptors"]], "wallet_encryption.py": [["--descriptors"]], "wallet_hd.py": [["--descriptors"]], + "wallet_assumeutxo.py": [["--descriptors"]], "wallet_importprunedfunds.py": [["--descriptors"]], # FIXME: "wallet_keypool.py": [["--descriptors"]], "wallet_keypool_topup.py": [["--descriptors"]], diff --git a/test/functional/wallet_assumeutxo.py b/test/functional/wallet_assumeutxo.py new file mode 100755 index 0000000000..764fd9e258 --- /dev/null +++ b/test/functional/wallet_assumeutxo.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +# Copyright (c) 2023-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test for assumeutxo wallet related behavior. +See feature_assumeutxo.py for background. + +## Possible test improvements + +- TODO: test loading a wallet (backup) on a pruned node + +""" +from test_framework.cashaddr import decode +from test_framework.descriptors import descsum_create +from test_framework.test_framework import BitcoinTestFramework +from test_framework.messages import COIN +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + ensure_for, +) +from test_framework.wallet import MiniWallet, cashaddr_to_scriptpubkey +from test_framework.wallet_util import get_generate_key + +START_HEIGHT = 199 +SNAPSHOT_BASE_HEIGHT = 299 +FINAL_HEIGHT = 399 + + +class AssumeutxoTest(BitcoinTestFramework): + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + def set_test_params(self): + """Use the pregenerated, deterministic chain up to height 199.""" + self.num_nodes = 3 + self.rpc_timeout = 120 + self.extra_args = [ + [], + [], + [], + ] + + def setup_network(self): + """Start with the nodes disconnected so that one can generate a snapshot + including blocks the other hasn't yet seen.""" + self.add_nodes(3) + self.start_nodes(extra_args=self.extra_args) + + def import_descriptor(self, node, wallet_name, key, timestamp): + import_request = [{"desc": descsum_create("pkh(" + key.pubkey + ")"), + "timestamp": timestamp, + "label": "Descriptor import test"}] + wrpc = node.get_wallet_rpc(wallet_name) + return wrpc.importdescriptors(import_request) + + def run_test(self): + """ + Bring up two (disconnected) nodes, mine some new blocks on the first, + and generate a UTXO snapshot. + + Load the snapshot into the second, ensure it syncs to tip and completes + background validation when connected to the first. + """ + n0 = self.nodes[0] + n1 = self.nodes[1] + n2 = self.nodes[2] + + self.mini_wallet = MiniWallet(n0) + + # Mock time for a deterministic chain + for n in self.nodes: + n.setmocktime(n.getblockheader(n.getbestblockhash())['time']) + + # Create a wallet that we will create a backup for later (at snapshot height) + n0.createwallet('w') + w = n0.get_wallet_rpc("w") + w_address = w.getnewaddress() + + # Create another wallet and backup now (before snapshot height) + n0.createwallet('w2') + w2 = n0.get_wallet_rpc("w2") + w2_address = w2.getnewaddress() + w2.backupwallet("backup_w2.dat") + + # Generate a series of blocks that `n0` will have in the snapshot, + # but that n1 doesn't yet see. In order for the snapshot to activate, + # though, we have to ferry over the new headers to n1 so that it + # isn't waiting forever to see the header of the snapshot's base block + # while disconnected from n0. + for i in range(100): + if i % 3 == 0: + self.mini_wallet.send_self_transfer(from_node=n0) + self.generate(n0, nblocks=1, sync_fun=self.no_op) + newblock = n0.getblock(n0.getbestblockhash(), 0) + + # make n1 aware of the new header, but don't give it the block. + n1.submitheader(newblock) + n2.submitheader(newblock) + + # Ensure everyone is seeing the same headers. + for n in self.nodes: + assert_equal(n.getblockchaininfo()[ + "headers"], SNAPSHOT_BASE_HEIGHT) + + # This backup is created at the snapshot height, so it's + # not part of the background sync anymore + w.backupwallet("backup_w.dat") + + self.log.info("-- Testing assumeutxo") + + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + self.log.info( + f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}") + dump_output = n0.dumptxoutset('utxos.dat') + + assert_equal( + dump_output['txoutset_hash'], + 'a966794ed5a2f9debaefc7ca48dbc5d5e12a89ff9fe45bd00ec5732d074580a9') + assert_equal(dump_output['nchaintx'], 334) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This + # will allow us to test n1's sync-to-tip on top of a snapshot. + w_skp = cashaddr_to_scriptpubkey(w_address) + w2_skp = cashaddr_to_scriptpubkey(w2_address) + for i in range(100): + if i % 3 == 0: + self.mini_wallet.send_to(from_node=n0, scriptPubKey=w_skp, amount=1 * COIN) + self.mini_wallet.send_to(from_node=n0, scriptPubKey=w2_skp, amount=10 * COIN) + self.generate(n0, nblocks=1, sync_fun=self.no_op) + + assert_equal(n0.getblockcount(), FINAL_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + assert_equal(n2.getblockcount(), START_HEIGHT) + + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + self.log.info( + f"Loading snapshot into second node from {dump_output['path']}") + loaded = n1.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + + normal, snapshot = n1.getchainstates()["chainstates"] + assert_equal(normal['blocks'], START_HEIGHT) + assert_equal(normal.get('snapshot_blockhash'), None) + assert_equal(normal['validated'], True) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) + assert_equal(snapshot['validated'], False) + + assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + self.log.info("Backup from the snapshot height can be loaded during background sync") + n1.restorewallet("w", "backup_w.dat") + # Balance of w wallet is still still 0 because n1 has not synced yet + assert_equal(n1.getbalance(), 0) + + self.log.info("Backup from before the snapshot height can't be loaded during background sync") + assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w2", "backup_w2.dat") + + if self.options.descriptors: + self.log.info("Test loading descriptors during background sync") + wallet_name = "w1" + n1.createwallet(wallet_name, disable_private_keys=True) + key = get_generate_key() + time = n1.getblockchaininfo()['time'] + timestamp = 0 + expected_error_message = ( + f"Rescan failed for descriptor with creation timestamp {timestamp}. There " + f"was an error reading a block from time {time}, which is after or within " + f"7200 seconds of key creation, and could contain transactions pertaining " + f"to the descriptor. As a result, transactions and coins using this " + "descriptor may not appear in the wallet. This error is likely caused by " + "an in-progress assumeutxo background sync. Check logs or getchainstates " + "RPC for assumeutxo background sync progress and try again later." + ) + result = self.import_descriptor(n1, wallet_name, key, timestamp) + assert_equal(result[0]['error']['code'], -1) + assert_equal(result[0]['error']['message'], expected_error_message) + + PAUSE_HEIGHT = FINAL_HEIGHT - 40 + + self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) + self.restart_node(1, extra_args=[ + f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]]) + + # Finally connect the nodes and let them sync. + # + # Set `wait_for_connect=False` to avoid a race between performing connection + # assertions and the -stopatheight tripping. + self.connect_nodes(0, 1, wait_for_connect=False) + + n1.wait_until_stopped(timeout=5) + + self.log.info( + "Restarted node before snapshot validation completed, reloading...") + self.restart_node(1, extra_args=self.extra_args[1]) + + # TODO: inspect state of e.g. the wallet before reconnecting + self.connect_nodes(0, 1) + + self.log.info( + f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") + self.wait_until(lambda: n1.getchainstates()[ + 'chainstates'][-1]['blocks'] == FINAL_HEIGHT) + self.sync_blocks(nodes=(n0, n1)) + + self.log.info("Ensuring background validation completes") + self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1) + + self.log.info("Ensuring wallet can be restored from a backup that was created before the snapshot height") + # fixme: figure out why we can't reuse the wallet name w2 in the + # following test (test_framework.authproxy.JSONRPCException: Wallet name already exists. (-8)) + n1.restorewallet("w2_bis", "backup_w2.dat") + # Check balance of w2 wallet + assert_equal(n1.getbalance(), 340_000_000) + + # Check balance of w wallet after node is synced + n1.loadwallet("w") + w = n1.get_wallet_rpc("w") + assert_equal(w.getbalance(), 34_000_000) + + self.log.info("Check balance of a wallet that is active during snapshot completion") + n2.restorewallet("w", "backup_w.dat") + loaded = n2.loadtxoutset(dump_output['path']) + self.connect_nodes(0, 2) + self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1) + ensure_for(duration=1, f=lambda: (n2.getbalance() == 34_000_000)) + + if self.options.descriptors: + self.log.info("Ensuring descriptors can be loaded after background sync") + n1.loadwallet(wallet_name) + result = self.import_descriptor(n1, wallet_name, key, timestamp) + assert_equal(result[0]['success'], True) + + +if __name__ == '__main__': + AssumeutxoTest().main() From a734aeeb4a21cfbcf728c02b30e140a869643d09 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 30 Sep 2024 01:42:47 +0200 Subject: [PATCH 6/7] rpc: Include assumeutxo as a failure reason of rescanblockchain https://github.com/bitcoin/bitcoin/pull/30909/commits/42d5d5336319aaf0f07345037db78239d9e012fc https://github.com/bitcoin/bitcoin/pull/30909/commits/9d2d9f7ce29636f08322df70cf6abec8e0ca3727 --- src/interfaces/chain.h | 3 +++ src/node/interfaces.cpp | 5 +++++ src/wallet/rpcwallet.cpp | 23 +++++++++++++++++++---- test/functional/wallet_assumeutxo.py | 8 ++++++-- 4 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index 9a88682be2..2a520b4213 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -228,6 +228,9 @@ class Chain { //! Check if any block has been pruned. virtual bool havePruned() = 0; + //! Get the current prune height. + virtual std::optional getPruneHeight() = 0; + //! Check if the node is ready to broadcast transactions. virtual bool isReadyToBroadcast() = 0; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 7f73f8212a..241f0b4ecc 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -716,6 +717,10 @@ namespace { return !chainman().m_blockman.LoadingBlocks() && !isInitialBlockDownload(); } + std::optional getPruneHeight() override { + LOCK(chainman().GetMutex()); + return GetPruneHeight(chainman().m_blockman, chainman().ActiveChain()); + } bool isInitialBlockDownload() override { return chainman().IsInitialBlockDownload(); } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index eebf48d2b1..2e40893453 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -3767,14 +3767,29 @@ RPCHelpMan rescanblockchain() { } } - // We can't rescan beyond non-pruned blocks, stop and throw an - // error + // We can't rescan unavailable blocks, stop and throw an error if (!pwallet->chain().hasBlocks(pwallet->GetLastBlockHash(), start_height, stop_height)) { + if (pwallet->chain().havePruned() && + pwallet->chain().getPruneHeight() >= start_height) { + throw JSONRPCError(RPC_MISC_ERROR, + "Can't rescan beyond pruned data. " + "Use RPC call getblockchaininfo to " + "determine your pruned height."); + } + if (pwallet->chain().hasAssumedValidChain()) { + throw JSONRPCError( + RPC_MISC_ERROR, + "Failed to rescan unavailable blocks likely due to " + "an in-progress assumeutxo background sync. Check " + "logs or getchainstates RPC for assumeutxo " + "background sync progress and try again later."); + } throw JSONRPCError( RPC_MISC_ERROR, - "Can't rescan beyond pruned data. Use RPC call " - "getblockchaininfo to determine your pruned height."); + "Failed to rescan unavailable blocks, potentially " + "caused by data corruption. If the issue persists you " + "may want to reindex (see -reindex option)."); } CHECK_NONFATAL(pwallet->chain().findAncestorByHeight( diff --git a/test/functional/wallet_assumeutxo.py b/test/functional/wallet_assumeutxo.py index 764fd9e258..54b3f97931 100755 --- a/test/functional/wallet_assumeutxo.py +++ b/test/functional/wallet_assumeutxo.py @@ -161,10 +161,10 @@ def run_test(self): self.log.info("Backup from before the snapshot height can't be loaded during background sync") assert_raises_rpc_error(-4, "Wallet loading failed. Error loading wallet. Wallet requires blocks to be downloaded, and software does not currently support loading wallets while blocks are being downloaded out of order when using assumeutxo snapshots. Wallet should be able to load successfully after node sync reaches height 299", n1.restorewallet, "w2", "backup_w2.dat") + wallet_name = "w1" + n1.createwallet(wallet_name, disable_private_keys=True) if self.options.descriptors: self.log.info("Test loading descriptors during background sync") - wallet_name = "w1" - n1.createwallet(wallet_name, disable_private_keys=True) key = get_generate_key() time = n1.getblockchaininfo()['time'] timestamp = 0 @@ -181,6 +181,10 @@ def run_test(self): assert_equal(result[0]['error']['code'], -1) assert_equal(result[0]['error']['message'], expected_error_message) + self.log.info("Test that rescanning blocks from before the snapshot fails when blocks are not available from the background sync yet") + w1 = n1.get_wallet_rpc(wallet_name) + assert_raises_rpc_error(-1, "Failed to rescan unavailable blocks likely due to an in-progress assumeutxo background sync. Check logs or getchainstates RPC for assumeutxo background sync progress and try again later.", w1.rescanblockchain, 100) + PAUSE_HEIGHT = FINAL_HEIGHT - 40 self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) From f1a2e6dbf3ae2114b6d8e83dc9601e2db32c391e Mon Sep 17 00:00:00 2001 From: PiRK Date: Tue, 25 Mar 2025 23:52:15 +0100 Subject: [PATCH 7/7] add a mainnet assumeutxo checkpoint --- src/kernel/chainparams.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 8d3fa0b479..bf8fcb1ce6 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -213,7 +213,12 @@ class CMainParams : public CChainParams { checkpointData = CheckpointData(CBaseChainParams::MAIN); m_assumeutxo_data = { - // TODO to be specified in a future patch. + {.height = 888'000, + .hash_serialized = + AssumeutxoHash{uint256S("0x50493f6218661a189654dbad816821a656b519454190c63daf376610e4fa0a7e")}, + .nChainTx = 299'158'458, + .blockhash = + BlockHash{uint256S("0x00000000000000002b218d995a292c34bc4c0244bb4bbdad18f3a97e88ccb567")}}, }; // Data as of block