From ed990518283c5605aa725347360e204b3adf36ab Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 22 May 2023 11:18:41 -0400 Subject: [PATCH 1/5] Merge bitcoin/bitcoin#25796: rpc: add `descriptorprocesspsbt` rpc 1bce12acd3e271a7c88d9400b4e3a5645bc8a911 test: add test for `descriptorprocesspsbt` RPC (ishaanam) fb2a3a70e860aa87fb7a21f6554ed9f3ce901e2d rpc: add descriptorprocesspsbt rpc (ishaanam) Pull request description: This PR implements an RPC called `descriptorprocesspsbt`. This RPC is based off of `walletprocesspsbt`, but instead of interacting with the wallet to update, sign and finalize a psbt, it instead accepts an array of output descriptors and uses that information along with information from the mempool, txindex, and the utxo set to do so. `utxoupdatepsbt` also updates a psbt in this manner, but doesn't sign or finalize it. Because of this overlap, a helper function that is added in this PR is called by both `utxoupdatepsbt` and `descriptorprocesspsbt`. Whether or not the helper function signs a psbt is dictated by if the HidingSigningProvider passed to it contains any private information. There is also a test added in this PR for this new RPC that uses p2wsh, p2wpkh, and legacy outputs. Edit: see https://github.com/bitcoin/bitcoin/pull/25796#issuecomment-1228830963 ACKs for top commit: achow101: re-ACK 1bce12acd3e271a7c88d9400b4e3a5645bc8a911 instagibbs: reACK https://github.com/bitcoin/bitcoin/pull/25796/commits/1bce12acd3e271a7c88d9400b4e3a5645bc8a911 Tree-SHA512: e1d0334739943e71f2ee68b4db7637ebe725da62e7aa4be071f71c7196d2a5970a31ece96d91e372d34454cde8509e95ab0eebd2c8edb94f7d5a781a84f8fc5d --- src/rpc/client.cpp | 3 ++ src/rpc/rawtransaction.cpp | 93 ++++++++++++++++++++++++++++++++++--- src/rpc/util.cpp | 5 +- src/rpc/util.h | 5 +- src/test/fuzz/rpc.cpp | 1 + test/functional/rpc_psbt.py | 47 ++++++++++++++++++- 6 files changed, 143 insertions(+), 11 deletions(-) diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 655860d6b700..ec6b467efd26 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -150,6 +150,9 @@ static const CRPCConvertParam vRPCConvertParams[] = { "walletprocesspsbt", 1, "sign" }, { "walletprocesspsbt", 3, "bip32derivs" }, { "walletprocesspsbt", 4, "finalize" }, + { "descriptorprocesspsbt", 1, "descriptors"}, + { "descriptorprocesspsbt", 3, "bip32derivs" }, + { "descriptorprocesspsbt", 4, "finalize" }, { "createpsbt", 0, "inputs" }, { "createpsbt", 1, "outputs" }, { "createpsbt", 2, "locktime" }, diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 7a0dd1f71e05..e1bb68cb9e7d 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -221,8 +221,9 @@ static std::vector CreateTxDoc() }; } -// Update PSBT with information from the mempool, the UTXO set, the txindex, and the provided descriptors -PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const CoreContext& context, const HidingSigningProvider& provider) +// Update PSBT with information from the mempool, the UTXO set, the txindex, and the provided descriptors. +// Optionally, sign the inputs that we can using information from the descriptors. +PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const CoreContext& context, const HidingSigningProvider& provider, int sighash_type, bool finalize) { // Unserialize the transactions PartiallySignedTransaction psbtx; @@ -271,9 +272,10 @@ PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const Cor } // Update script/keypath information using descriptor data. - // Note that SignPSBTInput does a lot more than just constructing ECDSA signatures - // we don't actually care about those here, in fact. - SignPSBTInput(provider, psbtx, /*index=*/i, &txdata, /*sighash=*/1); + // Note that SignPSBTInput does a lot more than just constructing ECDSA signatures. + // We only actually care about those if our signing provider doesn't hide private + // information, as is the case with `descriptorprocesspsbt` + SignPSBTInput(provider, psbtx, /*index=*/i, &txdata, sighash_type, /*out_sigdata=*/nullptr, finalize); } // Update script/keypath information using descriptor data. @@ -1760,7 +1762,9 @@ static RPCHelpMan utxoupdatepsbt() const PartiallySignedTransaction& psbtx = ProcessPSBT( request.params[0].get_str(), request.context, - HidingSigningProvider(&provider, /*hide_secret=*/true, /*hide_origin=*/false)); + HidingSigningProvider(&provider, /*hide_secret=*/true, /*hide_origin=*/false), + /*sighash_type=*/SIGHASH_ALL, + /*finalize=*/false); CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << psbtx; @@ -1957,6 +1961,82 @@ static RPCHelpMan analyzepsbt() }; } +RPCHelpMan descriptorprocesspsbt() +{ + return RPCHelpMan{"descriptorprocesspsbt", + "\nUpdate all segwit inputs in a PSBT with information from output descriptors, the UTXO set or the mempool. \n" + "Then, sign the inputs we are able to with information from the output descriptors. ", + { + {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction base64 string"}, + {"descriptors", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of either strings or objects", { + {"", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "An output descriptor"}, + {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "An object with an output descriptor and extra information", { + {"desc", RPCArg::Type::STR, RPCArg::Optional::NO, "An output descriptor"}, + {"range", RPCArg::Type::RANGE, RPCArg::Default{1000}, "Up to what index HD chains should be explored (either end or [begin,end])"}, + }}, + }}, + {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"DEFAULT for Taproot, ALL otherwise"}, "The signature hash type to sign with if not specified by the PSBT. Must be one of\n" + " \"DEFAULT\"\n" + " \"ALL\"\n" + " \"NONE\"\n" + " \"SINGLE\"\n" + " \"ALL|ANYONECANPAY\"\n" + " \"NONE|ANYONECANPAY\"\n" + " \"SINGLE|ANYONECANPAY\""}, + {"bip32derivs", RPCArg::Type::BOOL, RPCArg::Default{true}, "Include BIP 32 derivation paths for public keys if we know them"}, + {"finalize", RPCArg::Type::BOOL, RPCArg::Default{true}, "Also finalize inputs if possible"}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction"}, + {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"}, + } + }, + RPCExamples{ + HelpExampleCli("descriptorprocesspsbt", "\"psbt\" \"[\\\"descriptor1\\\", \\\"descriptor2\\\"]\"") + + HelpExampleCli("descriptorprocesspsbt", "\"psbt\" \"[{\\\"desc\\\":\\\"mydescriptor\\\", \\\"range\\\":21}]\"") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + // Add descriptor information to a signing provider + FlatSigningProvider provider; + + auto descs = request.params[1].get_array(); + for (size_t i = 0; i < descs.size(); ++i) { + EvalDescriptorStringOrObject(descs[i], provider, /*expand_priv=*/true); + } + + int sighash_type = ParseSighashString(request.params[2]); + bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool(); + bool finalize = request.params[4].isNull() ? true : request.params[4].get_bool(); + + const PartiallySignedTransaction& psbtx = ProcessPSBT( + request.params[0].get_str(), + request.context, + HidingSigningProvider(&provider, /*hide_secret=*/false, !bip32derivs), + sighash_type, + finalize); + + // Check whether or not all of the inputs are now signed + bool complete = true; + for (const auto& input : psbtx.inputs) { + complete &= PSBTInputSigned(input); + } + + CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); + ssTx << psbtx; + + UniValue result(UniValue::VOBJ); + + result.pushKV("psbt", EncodeBase64(ssTx)); + result.pushKV("complete", complete); + + return result; +}, + }; +} + void RegisterRawTransactionRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -1976,6 +2056,7 @@ void RegisterRawTransactionRPCCommands(CRPCTable& t) {"rawtransactions", &createpsbt}, {"rawtransactions", &converttopsbt}, {"rawtransactions", &utxoupdatepsbt}, + {"rawtransactions", &descriptorprocesspsbt}, {"rawtransactions", &joinpsbts}, {"rawtransactions", &analyzepsbt}, }; diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 738a5dea162e..b5214882d36a 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -981,7 +981,7 @@ UniValue JSONRPCTransactionError(TransactionError terr, const std::string& err_s } } -std::vector EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider) +std::vector EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider, const bool expand_priv) { std::string desc_str; std::pair range = {0, 1000}; @@ -1014,6 +1014,9 @@ std::vector EvalDescriptorStringOrObject(const UniValue& scanobject, Fl if (!desc->Expand(i, provider, scripts, provider)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Cannot derive script without private keys: '%s'", desc_str)); } + if (expand_priv) { + desc->ExpandPrivate(/*pos=*/i, provider, /*out=*/provider); + } std::move(scripts.begin(), scripts.end(), std::back_inserter(ret)); } return ret; diff --git a/src/rpc/util.h b/src/rpc/util.h index cbbd8831b283..c83000fc4dcf 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -121,6 +121,8 @@ unsigned int ParseConfirmTarget(const UniValue& value, unsigned int max_target); //! Parse a JSON range specified as int64, or [int64, int64] std::pair ParseDescriptorRange(const UniValue& value); +/** Evaluate a descriptor given as a string, or as a {"desc":...,"range":...} object, with default range of 1000. */ +std::vector EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider, const bool expand_priv = false); /** * Serializing JSON objects depends on the outer type. Only arrays and * dictionaries can be nested in json. The top-level outer type is "NONE". @@ -130,9 +132,6 @@ enum class OuterType { OBJ, NONE, // Only set on first recursion }; -/** Evaluate a descriptor given as a string, or as a {"desc":...,"range":...} object, with default range of 1000. */ -std::vector EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider); - struct RPCArg { enum class Type { OBJ, diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index d234b27bb1db..09ba015c15cb 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -97,6 +97,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "decoderawtransaction", "decodescript", "deriveaddresses", + "descriptorprocesspsbt", "disconnectnode", "echo", "echojson", diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 5788ff158336..d82be3ac11dd 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -34,7 +34,10 @@ find_output, random_bytes, ) -from test_framework.wallet_util import bytes_to_wif +from test_framework.wallet_util import ( + bytes_to_wif, + get_generate_key +) import json import os @@ -637,6 +640,48 @@ def test_psbt_input_keys(psbt_input, keys): assert_raises_rpc_error(-8, "PSBTs not compatible (different transactions)", self.nodes[0].combinepsbt, [psbt1, psbt2]) assert_equal(self.nodes[0].combinepsbt([psbt1, psbt1]), psbt1) + self.log.info("Test descriptorprocesspsbt updates and signs a psbt with descriptors") + + self.generate(self.nodes[2], 1) + + # Disable the wallet for node 2 since `descriptorprocesspsbt` does not use the wallet + self.restart_node(2, extra_args=["-disablewallet"]) + self.connect_nodes(0, 2) + self.connect_nodes(1, 2) + + key_info = get_generate_key() + key = key_info.privkey + address = key_info.p2pkh_addr + + descriptor = descsum_create(f"pkh({key})") + + txid = self.nodes[0].sendtoaddress(address, 1) + self.sync_all() + vout = find_output(self.nodes[0], txid, 1) + + psbt = self.nodes[2].createpsbt([{"txid": txid, "vout": vout}], {self.nodes[0].getnewaddress(): 0.99999}) + decoded = self.nodes[2].decodepsbt(psbt) + test_psbt_input_keys(decoded['inputs'][0], []) + + # Test that even if the wrong descriptor is given, `non_witness_utxo` + # is still added to the psbt + alt_descriptor = descsum_create(f"pkh({get_generate_key().privkey})") + alt_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[alt_descriptor], sighashtype="ALL")["psbt"] + decoded = self.nodes[2].decodepsbt(alt_psbt) + test_psbt_input_keys(decoded['inputs'][0], ['non_witness_utxo']) + + # Test that the psbt is not finalized and does have bip32_derivs when specified + psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=True, finalize=False)["psbt"] + decoded = self.nodes[2].decodepsbt(psbt) + test_psbt_input_keys(decoded['inputs'][0], ['non_witness_utxo', 'partial_signatures', 'bip32_derivs']) + + psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=False, finalize=True)["psbt"] + decoded = self.nodes[2].decodepsbt(psbt) + test_psbt_input_keys(decoded['inputs'][0], ['non_witness_utxo', 'final_scriptSig']) + + # Broadcast transaction + rawtx = self.nodes[2].finalizepsbt(psbt)["hex"] + self.nodes[2].sendrawtransaction(rawtx) if __name__ == '__main__': PSBTTest().main() From 0eb65f9da72b64f32cf00d4d9f381e56eea7cd60 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Tue, 23 May 2023 15:40:30 -0400 Subject: [PATCH 2/5] Merge bitcoin/bitcoin#27177: test: fix intermittent issue in `feature_bip68_sequence` 272eb5561667482f8226bcf98eea00689dccefb8 test: fix `include_immature_coinbase` logic in `get_utxos` (brunoerg) a951c34f179dad0c7059b04a7f1e6b0804462168 test: fix `interface_usdt_mempool` by mining a block after each test (brunoerg) 1557bf1196bc2bdf00fd32f3b5d525796b4d194c test: fix mature utxos addition to wallet in `mempool_package_limits` (brunoerg) 60ced9007d518d542ce489b91076f9bbaf3312e3 test: fix intermittent issue in `feature_bip68_sequence` (brunoerg) Pull request description: Fixes #27129 To avoid `bad-txns-premature-spend-of-coinbase` error, when getting a utxo (using `get_utxo`) to create a new transaction `get_utxo` shouldn't return (if possible) by default immature coinbase. ACKs for top commit: achow101: ACK 272eb5561667482f8226bcf98eea00689dccefb8 pinheadmz: re-ACK 272eb5561667482f8226bcf98eea00689dccefb8 Tree-SHA512: eae821c7833bf084d8b907c94876ed010a7925d2177c3013a0c61b69d9571df006da83397a19487d93b0d1fa415951152f0b8ad0de2a55d86c39f6917934f050 --- test/functional/mempool_package_limits.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py index 9df8899895b8..330887ec22ac 100755 --- a/test/functional/mempool_package_limits.py +++ b/test/functional/mempool_package_limits.py @@ -46,8 +46,7 @@ def set_test_params(self): def run_test(self): self.wallet = MiniWallet(self.nodes[0]) # Add enough mature utxos to the wallet so that all txs spend confirmed coins. - self.generate(self.wallet, 35) - self.generate(self.nodes[0], COINBASE_MATURITY) + self.generate(self.wallet, COINBASE_MATURITY + 35) self.test_chain_limits() self.test_desc_count_limits() From 27033bcae2d52e6c4738106fc2276082891ae59d Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Tue, 21 Apr 2026 16:31:39 -0500 Subject: [PATCH 3/5] Merge bitcoin/bitcoin#27626: Parallel compact block downloads, take 3 d7f359b35e8b1e9acc4d397de262cd9ba9bbcb83 Add tests for parallel compact block downloads (Greg Sanders) 03423f8bd12b95a06a4a9d8377e781625dd38aae Support up to 3 parallel compact block txn fetchings (Greg Sanders) 13f9b20b4cb2f3f26e81184a77e9cf1f626d4f57 Only request full blocks from the peer we thought had the block in-flight (Greg Sanders) cce96182ba2457335868c65dc16b081c3dee32ee Convert mapBlocksInFlight to a multimap (Greg Sanders) a90595478dcf4e443cd15bbb822d485dc42bdb18 Remove nBlocksInFlight (Greg Sanders) 86cff8bf18f2c6344a25ad8b81cf366201a73c36 alias BlockDownloadMap for mapBlocksInFlight (Greg Sanders) Pull request description: This is an attempt at mitigating https://github.com/bitcoin/bitcoin/issues/25258 , which is a revival of https://github.com/bitcoin/bitcoin/pull/10984, which is a revival of https://github.com/bitcoin/bitcoin/pull/9447. This PR attempts to mitigate a single case, where high bandwidth peers can bail us out of a flakey peer not completing blocks for us. We allow up to 2 additional getblocktxns requests per unique block. This would hopefully allow the chance for an honest high bandwidth peer to hand us the transactions even if the first in flight peer stalls out. In contrast to previous effort: 1) it will not help if subsequent peers send block headers only, so only high-bandwidth peers this time. See: https://github.com/bitcoin/bitcoin/pull/10984/files#diff-6875de769e90cec84d2e8a9c1b962cdbcda44d870d42e4215827e599e11e90e3R1411 2) `MAX_GETBLOCKTXN_TXN_AFTER_FIRST_IN_FLIGHT` is removed, in favor of aiding recovery during turbulent mempools 3) We require one of the 3 block fetching slots to be an outbound peer. This can be the original offering peer, or subsequent compact blocks given by high bandwidth peers. ACKs for top commit: sdaftuar: ACK d7f359b35e8b1e9acc4d397de262cd9ba9bbcb83 mzumsande: Code Review ACK d7f359b35e8b1e9acc4d397de262cd9ba9bbcb83 Tree-SHA512: 54980eac179e30f12a0bd49df147b2c3d63cd8f9401abb23c7baf02f76eeb59f2cfaaa155227990d0d39384de9fa38663f88774e891600a3837ae927f04f0db3 --- src/net.h | 2 + src/net_processing.cpp | 229 ++++++++++++++++++--------- src/net_processing.h | 2 + src/rpc/blockchain.cpp | 2 +- test/functional/p2p_compactblocks.py | 92 ++++++++++- 5 files changed, 248 insertions(+), 79 deletions(-) diff --git a/src/net.h b/src/net.h index fbea1d395be8..1f0d682cdd3c 100644 --- a/src/net.h +++ b/src/net.h @@ -223,7 +223,9 @@ class CNodeStats int nVersion; std::string cleanSubVer; bool fInbound; + // We requested high bandwidth connection to peer bool m_bip152_highbandwidth_to; + // Peer requested high bandwidth connection bool m_bip152_highbandwidth_from; int m_starting_height; uint64_t nSendBytes; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 21a3f7102bdb..2b4a14574881 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -457,7 +457,6 @@ struct CNodeState { std::list vBlocksInFlight; //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. std::chrono::microseconds m_downloading_since{0us}; - int nBlocksInFlight{0}; //! Whether we consider this a preferred download peer. bool fPreferredDownload{false}; //! Whether this peer wants invs or headers (when possible) for block announcements. @@ -1036,6 +1035,9 @@ class PeerManagerImpl final : public PeerManager /** Have we requested this block from a peer */ bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Have we requested this block from an outbound peer */ + bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Remove this block from our tracked requested blocks. Called if: * - the block has been recieved from a peer * - the request for the block has timed out @@ -1058,7 +1060,9 @@ class PeerManagerImpl final : public PeerManager */ void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - std::map::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); + /* Multimap used to preserve insertion order */ + typedef std::multimap::iterator>> BlockDownloadMap; + BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); /** When our tip was last updated. */ std::atomic m_last_tip_update{0s}; @@ -1238,40 +1242,55 @@ std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::micros bool PeerManagerImpl::IsBlockRequested(const uint256& hash) { - return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end(); + return mapBlocksInFlight.count(hash); } -void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional from_peer) +bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash) { - auto it = mapBlocksInFlight.find(hash); - if (it == mapBlocksInFlight.end()) { - // Block was not requested - return; + for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { + auto [nodeid, block_it] = range.first->second; + CNodeState& nodestate = *Assert(State(nodeid)); + if (!nodestate.m_is_inbound) return true; } - auto [node_id, list_it] = it->second; + return false; +} - if (from_peer && node_id != *from_peer) { - // Block was requested by another peer +void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional from_peer) +{ + auto range = mapBlocksInFlight.equal_range(hash); + if (range.first == range.second) { + // Block was not requested from any peer return; } - CNodeState *state = State(node_id); - assert(state != nullptr); + // We should not have requested too many of this block + Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); - if (state->vBlocksInFlight.begin() == list_it) { - // First block on the queue was received, update the start download time for the next one - state->m_downloading_since = std::max(state->m_downloading_since, GetTime()); - } - state->vBlocksInFlight.erase(list_it); + while (range.first != range.second) { + auto [node_id, list_it] = range.first->second; + + if (from_peer && *from_peer != node_id) { + range.first++; + continue; + } + + CNodeState& state = *Assert(State(node_id)); - state->nBlocksInFlight--; - if (state->nBlocksInFlight == 0) { - // Last validated block on the queue was received. - m_peers_downloading_from--; + if (state.vBlocksInFlight.begin() == list_it) { + // First block on the queue was received, update the start download time for the next one + state.m_downloading_since = std::max(state.m_downloading_since, GetTime()); + } + state.vBlocksInFlight.erase(list_it); + + if (state.vBlocksInFlight.empty()) { + // Last validated block on the queue for this peer was received. + m_peers_downloading_from--; + } + state.m_stalling_since = 0us; + + range.first = mapBlocksInFlight.erase(range.first); } - state->m_stalling_since = 0us; - mapBlocksInFlight.erase(it); } bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list::iterator **pit) @@ -1281,27 +1300,29 @@ bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, st CNodeState *state = State(nodeid); assert(state != nullptr); + Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); + // Short-circuit most stuff in case it is from the same node - std::map::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); - if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { - if (pit) { - *pit = &itInFlight->second.second; + for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { + if (range.first->second.first == nodeid) { + if (pit) { + *pit = &range.first->second.second; + } + return false; } - return false; } - // Make sure it's not listed somewhere already. - RemoveBlockRequest(hash, std::nullopt); + // Make sure it's not being fetched already from same peer. + RemoveBlockRequest(hash, nodeid); std::list::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), {&block, std::unique_ptr(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); - state->nBlocksInFlight++; - if (state->nBlocksInFlight == 1) { + if (state->vBlocksInFlight.size() == 1) { // We're starting a block download (batch) from this peer. state->m_downloading_since = GetTime(); m_peers_downloading_from++; } - itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first; + auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))); if (pit) { *pit = &itInFlight->second.second; } @@ -1502,7 +1523,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co } } else if (waitingfor == -1) { // This is the first already-in-flight block. - waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; + waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first; } } } @@ -1789,12 +1810,20 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) { nSyncStarted--; for (const QueuedBlock& entry : state->vBlocksInFlight) { - mapBlocksInFlight.erase(entry.pindex->GetBlockHash()); + auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash()); + while (range.first != range.second) { + auto [node_id, list_it] = range.first->second; + if (node_id != nodeid) { + range.first++; + } else { + range.first = mapBlocksInFlight.erase(range.first); + } + } } m_orphanage.EraseForPeer(nodeid); if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); m_num_preferred_download_peers -= state->fPreferredDownload; - m_peers_downloading_from -= (state->nBlocksInFlight != 0); + m_peers_downloading_from -= (!state->vBlocksInFlight.empty()); assert(m_peers_downloading_from >= 0); m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(m_outbound_peers_with_protect_from_disconnect >= 0); @@ -2032,11 +2061,10 @@ std::optional PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl if (peer == nullptr) return "Peer does not exist"; LOCK(cs_main); - // Mark block as in-flight unless it already is (for this peer). - // If the peer does not send us a block, vBlocksInFlight remains non-empty, - // causing us to timeout and disconnect. - // If a block was already in-flight for a different peer, its BLOCKTXN - // response will be dropped. + // Forget about all prior requests + RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt); + + // Mark block as in-flight if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; // Construct message to request the block @@ -3194,8 +3222,8 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c } else { std::vector vGetData; // Download as much as possible, from earliest to latest. - for (const CBlockIndex *pindex : vToFetch | std::views::reverse) { - if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + for (const CBlockIndex* pindex : vToFetch | std::views::reverse) { + if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { // Can't download any more from this peer break; } @@ -4893,15 +4921,27 @@ void PeerManagerImpl::ProcessMessage( nodestate->m_last_block_announcement = GetTime(); } - std::map::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); - bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); - if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here return; + auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash()); + size_t already_in_flight = std::distance(range_flight.first, range_flight.second); + bool requested_block_from_this_peer{false}; + + // Multimap ensures ordering of outstanding requests. It's either empty or first in line. + bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); + + while (range_flight.first != range_flight.second) { + if (range_flight.first->second.first == pfrom.GetId()) { + requested_block_from_this_peer = true; + break; + } + range_flight.first++; + } + if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it - if (fAlreadyInFlight) { + if (requested_block_from_this_peer) { // We requested this block for some reason, but our mempool will probably be useless // so we just grab the block via normal getdata std::vector vInv(1); @@ -4912,15 +4952,16 @@ void PeerManagerImpl::ProcessMessage( } // If we're not close to tip yet, give up and let parallel block fetch work its magic - if (!fAlreadyInFlight && !CanDirectFetch()) + if (!already_in_flight && !CanDirectFetch()) { return; + } // We want to be a bit conservative just to be extra careful about DoS // possibilities in compact block processing... if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { - if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || - (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { - std::list::iterator *queuedBlockIt = nullptr; + if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || + requested_block_from_this_peer) { + std::list::iterator* queuedBlockIt = nullptr; if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); @@ -4938,11 +4979,16 @@ void PeerManagerImpl::ProcessMessage( Misbehaving(pfrom.GetId(), 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { - // Duplicate txindexes, the block is now in-flight, so just request it - std::vector vInv(1); - vInv[0] = CInv(MSG_BLOCK, blockhash); - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); - return; + if (first_in_flight) { + // Duplicate txindexes, the block is now in-flight, so just request it + std::vector vInv(1); + vInv[0] = CInv(MSG_BLOCK, blockhash); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); + return; + } else { + // Give up for this peer and wait for other peer(s) + RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); + } } BlockTransactionsRequest req; @@ -4956,9 +5002,24 @@ void PeerManagerImpl::ProcessMessage( txn.blockhash = blockhash; blockTxnMsg << txn; fProcessBLOCKTXN = true; - } else { + } else if (first_in_flight) { + // We will try to round-trip any compact blocks we get on failure, + // as long as it's first... req.blockhash = pindex->GetBlockHash(); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); + } else if (pfrom.m_bip152_highbandwidth_to && + (!pfrom.IsInboundConn() || + IsBlockRequestedFromOutbound(blockhash) || + already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) { + // ... or it's a hb relay peer and: + // - peer is outbound, or + // - we already have an outbound attempt in flight(so we'll take what we can get), or + // - it's not the final parallel download slot (which we may reserve for first outbound) + req.blockhash = pindex->GetBlockHash(); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); + } else { + // Give up for this peer and wait for other peer(s) + RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); } } else { // This block is either already in flight from a different @@ -4979,7 +5040,7 @@ void PeerManagerImpl::ProcessMessage( } } } else { - if (fAlreadyInFlight) { + if (requested_block_from_this_peer) { // We requested this block, but its far into the future, so our // mempool will probably be useless - request the block normally std::vector vInv(1); @@ -5050,24 +5111,44 @@ void PeerManagerImpl::ProcessMessage( { LOCK(cs_main); - std::map::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash); - if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || - it->second.first != pfrom.GetId()) { + auto range_flight = mapBlocksInFlight.equal_range(resp.blockhash); + size_t already_in_flight = std::distance(range_flight.first, range_flight.second); + bool requested_block_from_this_peer{false}; + + // Multimap ensures ordering of outstanding requests. It's either empty or first in line. + bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); + + while (range_flight.first != range_flight.second) { + auto [node_id, block_it] = range_flight.first->second; + if (node_id == pfrom.GetId() && block_it->partialBlock) { + requested_block_from_this_peer = true; + break; + } + range_flight.first++; + } + + if (!requested_block_from_this_peer) { LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId()); return; } - PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; + PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(resp.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { - // Might have collided, fall back to getdata now :( - std::vector invs; - invs.push_back(CInv(MSG_BLOCK, resp.blockhash)); - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); + if (first_in_flight) { + // Might have collided, fall back to getdata now :( + std::vector invs; + invs.push_back(CInv(MSG_BLOCK, resp.blockhash)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); + } else { + RemoveBlockRequest(resp.blockhash, pfrom.GetId()); + LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId()); + return; + } } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. @@ -5750,14 +5831,14 @@ void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) // valid headers chain with at least as much work as our tip. CNodeState *node_state = State(pnode->GetId()); if (node_state == nullptr || - (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->nBlocksInFlight == 0)) { + (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) { pnode->fDisconnect = true; LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", pnode->GetId(), count_seconds(pnode->m_last_block_time)); return true; } else { LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", - pnode->GetId(), count_seconds(pnode->m_connected), node_state->nBlocksInFlight); + pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size()); } return false; }); @@ -5808,13 +5889,13 @@ void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) // Also don't disconnect any peer we're trying to download a // block from. CNodeState &state = *State(pnode->GetId()); - if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) { + if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) { LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); pnode->fDisconnect = true; return true; } else { LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", - pnode->GetId(), count_seconds(pnode->m_connected), state.nBlocksInFlight); + pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size()); return false; } }); @@ -6522,17 +6603,17 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Message: getdata (blocks) // std::vector vGetData; - if (CanServeBlocks(*peer) && pto->CanRelay() && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (CanServeBlocks(*peer) && pto->CanRelay() && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); + FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller); for (const CBlockIndex *pindex : vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); BlockRequested(pto->GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } - if (state.nBlocksInFlight == 0 && staller != -1) { + if (state.vBlocksInFlight.empty() && staller != -1) { if (State(staller)->m_stalling_since == 0us) { State(staller)->m_stalling_since = current_time; LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); diff --git a/src/net_processing.h b/src/net_processing.h index 4967444fe346..d887db529dca 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -47,6 +47,8 @@ static const bool DEFAULT_PEERBLOOMFILTERS = true; static const bool DEFAULT_PEERBLOCKFILTERS = false; /** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */ static const int DISCOURAGEMENT_THRESHOLD{100}; +/** Maximum number of outstanding CMPCTBLOCK requests for the same block. */ +static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK = 3; struct CNodeStateStats { int m_misbehavior_score = 0; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 1803294f7b3c..be1343292a47 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -485,7 +485,7 @@ static RPCHelpMan getblockfrompeer() "getblockfrompeer", "Attempt to fetch block from a given peer.\n\n" "We must have the header for this block, e.g. using submitheader.\n" - "Subsequent calls for the same block and a new peer will cause the response from the previous peer to be ignored.\n" + "Subsequent calls for the same block may cause the response from the previous peer to be ignored.\n" "Peers generally ignore requests for a stale block that they never fully verified, or one that is more than a month old.\n" "When a peer does not respond with a block, we will disconnect.\n\n" "Returns an empty JSON object if the request was successfully scheduled.", diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index b0ee1e2a09d6..9f87704f20a3 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -102,6 +102,10 @@ def clear_block_announcement(self): self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) + def clear_getblocktxn(self): + with p2p_lock: + self.last_message.pop("getblocktxn", None) + def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator @@ -708,7 +712,7 @@ def request_cb_announcements(self, peer): peer.get_headers(locator=[int(tip, 16)], hashstop=0) peer.send_and_ping(msg_sendcmpct(announce=True, version=1)) - def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer): + def test_compactblock_reconstruction_stalling_peer(self, stalling_peer, delivery_peer): node = self.nodes[0] assert len(self.utxos) @@ -745,7 +749,9 @@ def announce_cmpct_block(node, peer): delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() - cmpct_block.prefilled_txn[0].tx = CTxIn() + # Keep the compact block structurally valid, but make it unreconstructable + # from this peer so the original in-flight request can still complete it. + cmpct_block.shortids[0] ^= 1 delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert int(node.getbestblockhash(), 16) != block.sha256 @@ -784,12 +790,85 @@ def assert_highbandwidth_states(node, hb_to, hb_from): hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=1)) assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False) + def test_compactblock_reconstruction_parallel_reconstruction(self, stalling_peer, delivery_peer, inbound_peer, outbound_peer): + """ All p2p connections are inbound except outbound_peer. We test that ultimate parallel slot + can only be taken by an outbound node unless prior attempts were done by an outbound + """ + node = self.nodes[0] + assert len(self.utxos) + + def announce_cmpct_block(node, peer, txn_count): + utxo = self.utxos.pop(0) + block = self.build_block_with_transactions(node, utxo, txn_count) + + cmpct_block = HeaderAndShortIDs() + cmpct_block.initialize_from_block(block) + msg = msg_cmpctblock(cmpct_block.to_p2p()) + peer.send_and_ping(msg) + with p2p_lock: + assert "getblocktxn" in peer.last_message + return block, cmpct_block + + for name, peer in [("delivery", delivery_peer), ("inbound", inbound_peer), ("outbound", outbound_peer)]: + self.log.info(f"Setting {name} as high bandwidth peer") + block, cmpct_block = announce_cmpct_block(node, peer, 1) + msg = msg_blocktxn() + msg.block_transactions.blockhash = block.sha256 + msg.block_transactions.transactions = block.vtx[1:] + peer.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + peer.clear_getblocktxn() + + # Test the simple parallel download case... + for num_missing in [1, 5, 20]: + + # Remaining low-bandwidth peer is stalling_peer, who announces first + assert_equal([peer['bip152_hb_to'] for peer in node.getpeerinfo()], [False, True, True, True]) + + block, cmpct_block = announce_cmpct_block(node, stalling_peer, num_missing) + + delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + with p2p_lock: + # The second peer to announce should still get a getblocktxn + assert "getblocktxn" in delivery_peer.last_message + assert int(node.getbestblockhash(), 16) != block.sha256 + + inbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + with p2p_lock: + # The third inbound peer to announce should *not* get a getblocktxn + assert "getblocktxn" not in inbound_peer.last_message + assert int(node.getbestblockhash(), 16) != block.sha256 + + outbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + with p2p_lock: + # The third peer to announce should get a getblocktxn if outbound + assert "getblocktxn" in outbound_peer.last_message + assert int(node.getbestblockhash(), 16) != block.sha256 + + # Second peer completes the compact block first + msg = msg_blocktxn() + msg.block_transactions.blockhash = block.sha256 + msg.block_transactions.transactions = block.vtx[1:] + delivery_peer.send_and_ping(msg) + assert_equal(int(node.getbestblockhash(), 16), block.sha256) + + # Nothing bad should happen if we get a late fill from the first peer... + stalling_peer.send_and_ping(msg) + self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) + + delivery_peer.clear_getblocktxn() + inbound_peer.clear_getblocktxn() + outbound_peer.clear_getblocktxn() + + def run_test(self): self.wallet = MiniWallet(self.nodes[0]) # Setup the p2p connections self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) self.additional_test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_HEADERS_COMPRESSED) + self.onemore_inbound_node = self.nodes[0].add_p2p_connection(TestP2PConn()) + self.outbound_node = self.nodes[0].add_outbound_p2p_connection(TestP2PConn(), p2p_idx=3, connection_type="outbound-full-relay") # We will need UTXOs to construct transactions in later tests. self.make_utxos() @@ -797,6 +876,8 @@ def run_test(self): self.log.info("Testing SENDCMPCT p2p message... ") self.test_sendcmpct(self.test_node) self.test_sendcmpct(self.additional_test_node) + self.test_sendcmpct(self.onemore_inbound_node) + self.test_sendcmpct(self.outbound_node) self.log.info("Testing compactblock construction...") self.test_compactblock_construction(self.test_node) @@ -813,8 +894,11 @@ def run_test(self): self.log.info("Testing handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.test_node) - self.log.info("Testing reconstructing compact blocks from all peers...") - self.test_compactblock_reconstruction_multiple_peers(self.test_node, self.additional_test_node) + self.log.info("Testing reconstructing compact blocks with a stalling peer...") + self.test_compactblock_reconstruction_stalling_peer(self.test_node, self.additional_test_node) + + self.log.info("Testing reconstructing compact blocks from multiple peers...") + self.test_compactblock_reconstruction_parallel_reconstruction(stalling_peer=self.test_node, inbound_peer=self.onemore_inbound_node, delivery_peer=self.additional_test_node, outbound_peer=self.outbound_node) # End-to-end block relay tests self.log.info("Testing end-to-end block relay...") From e9dc081cd280d3e59bc4b6d57fd9da9e41681f4b Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Thu, 23 Apr 2026 09:18:26 -0500 Subject: [PATCH 4/5] Merge bitcoin/bitcoin#27302: init: Error if ignored bitcoin.conf file is found eefe56967b4eb4b5144325cde4f40fc1cbde3e65 bugfix: Fix incorrect debug.log config file path (Ryan Ofsky) 3746f00be1b732a04976fc70cbb0661f97bbbd99 init: Error if ignored bitcoin.conf file is found (Ryan Ofsky) 398c3719b02197ad92fded20f6ff83b364747297 lint: Fix lint-format-strings false positives when format specifiers have argument positions (Ryan Ofsky) --- doc/release-notes-27302.md | 4 + src/init.cpp | 1 + src/init/common.cpp | 2 +- src/qt/test/test_main.cpp | 3 + src/util/system.cpp | 34 +++++++- src/util/system.h | 1 + test/functional/feature_config_args.py | 94 ++++++++++++++++++++- test/functional/test_framework/test_node.py | 4 +- test/functional/test_framework/util.py | 20 ++++- test/lint/run-lint-format-strings.py | 30 +++++-- 10 files changed, 178 insertions(+), 15 deletions(-) create mode 100644 doc/release-notes-27302.md diff --git a/doc/release-notes-27302.md b/doc/release-notes-27302.md new file mode 100644 index 000000000000..1cd137c41271 --- /dev/null +++ b/doc/release-notes-27302.md @@ -0,0 +1,4 @@ +Configuration +--- + +- `dashd` and `dash-qt` will now raise an error on startup if a datadir that is being used contains a `dash.conf` file that will be ignored, which can happen when a `datadir=` line is used in a `dash.conf` file. The error message is just a diagnostic intended to prevent accidental misconfiguration, and it can be disabled to restore the previous behavior of using the datadir while ignoring the `dash.conf` contained in it. diff --git a/src/init.cpp b/src/init.cpp index 703c50646cc3..34b646bad13d 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -562,6 +562,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS); argsman.AddArg("-dbcache=", strprintf("Maximum database cache size MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-includeconf=", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-allowignoredconf", strprintf("For backwards compatibility, treat an unused %s file in the datadir as a warning, not an error.", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-loadblock=", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-maxmempool=", strprintf("Keep the transaction memory pool below megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-maxorphantxsize=", strprintf("Maximum total size of all orphan transactions in megabytes (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS_SIZE), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); diff --git a/src/init/common.cpp b/src/init/common.cpp index a1a9f0c31a51..6b90128cb8ea 100644 --- a/src/init/common.cpp +++ b/src/init/common.cpp @@ -153,7 +153,7 @@ bool StartLogging(const ArgsManager& args) LogPrintf("Using data directory %s\n", fs::PathToString(gArgs.GetDataDirNet())); // Only log conf file usage message if conf file actually exists. - fs::path config_file_path = GetConfigFile(args.GetPathArg("-conf", BITCOIN_CONF_FILENAME)); + fs::path config_file_path = args.GetConfigFilePath(); if (fs::exists(config_file_path)) { LogPrintf("Config file: %s\n", fs::PathToString(config_file_path)); } else if (args.IsArgSet("-conf")) { diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp index 466c347c19fb..d4a8477bf1ec 100644 --- a/src/qt/test/test_main.cpp +++ b/src/qt/test/test_main.cpp @@ -72,6 +72,9 @@ int main(int argc, char* argv[]) gArgs.ForceSetArg("-upnp", "0"); gArgs.ForceSetArg("-natpmp", "0"); + std::string error; + if (!gArgs.ReadConfigFiles(error, true)) QWARN(error.c_str()); + // Prefer the "minimal" platform for the test instead of the normal default // platform ("xcb", "windows", or "cocoa") so tests can't unintentionally // interfere with any background GUIs and don't require extra resources. diff --git a/src/util/system.cpp b/src/util/system.cpp index d769dbd055be..ebfb0671d780 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -999,7 +999,8 @@ bool ArgsManager::ReadConfigStream(std::istream& stream, const std::string& file fs::path ArgsManager::GetConfigFilePath() const { - return GetConfigFile(GetPathArg("-conf", BITCOIN_CONF_FILENAME)); + LOCK(cs_args); + return *Assert(m_config_path); } bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys) @@ -1008,9 +1009,12 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys) LOCK(cs_args); m_settings.ro_config.clear(); m_config_sections.clear(); + m_config_path = AbsPathForConfigVal(GetPathArg("-conf", BITCOIN_CONF_FILENAME), /*net_specific=*/false); } + const fs::path orig_datadir_path{GetDataDirBase()}; const auto conf_path{GetConfigFilePath()}; + const fs::path& orig_config_path{conf_path}; std::ifstream stream{conf_path}; // not ok to have a config file specified that cannot be opened @@ -1058,7 +1062,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys) const size_t default_includes = add_includes({}); for (const std::string& conf_file_name : conf_file_names) { - std::ifstream conf_file_stream{GetConfigFile(fs::PathFromString(conf_file_name))}; + std::ifstream conf_file_stream{AbsPathForConfigVal(fs::PathFromString(conf_file_name), /*net_specific=*/false)}; if (conf_file_stream.good()) { if (!ReadConfigStream(conf_file_stream, conf_file_name, error, ignore_invalid_keys)) { return false; @@ -1098,6 +1102,32 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys) error = strprintf("specified data directory \"%s\" does not exist.", GetArg("-datadir", "")); return false; } + + const fs::path base_path = GetDataDirBase(); + const fs::path base_config_path = base_path / BITCOIN_CONF_FILENAME; + if (fs::exists(base_config_path) && !fs::equivalent(orig_config_path, base_config_path)) { + const std::string cli_config_path = GetArg("-conf", ""); + const std::string config_source = cli_config_path.empty() + ? strprintf("data directory %s", fs::quoted(fs::PathToString(orig_datadir_path))) + : strprintf("command line argument %s", fs::quoted("-conf=" + cli_config_path)); + const std::string ignored_conf_error = strprintf( + "Data directory %1$s contains a %2$s file which is ignored, because a different configuration file " + "%3$s from %4$s is being used instead. Possible ways to address this would be to:\n" + "- Delete or rename the %2$s file in data directory %1$s.\n" + "- Change datadir= or conf= options to specify one configuration file, not two, and use " + "includeconf= to include any other configuration files.\n" + "- Set allowignoredconf=1 option to treat this condition as a warning, not an error.", + fs::quoted(fs::PathToString(base_path)), + fs::quoted(BITCOIN_CONF_FILENAME), + fs::quoted(fs::PathToString(orig_config_path)), + config_source); + if (GetBoolArg("-allowignoredconf", false)) { + LogPrintf("Warning: %s\n", ignored_conf_error); + } else { + error = ignored_conf_error; + return false; + } + } return true; } diff --git a/src/util/system.h b/src/util/system.h index 08722d5f57bc..f7623c367727 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -212,6 +212,7 @@ class ArgsManager std::map> m_available_args GUARDED_BY(cs_args); bool m_accept_any_command GUARDED_BY(cs_args){true}; std::list m_config_sections GUARDED_BY(cs_args); + mutable std::optional m_config_path GUARDED_BY(cs_args); mutable fs::path m_cached_blocks_path GUARDED_BY(cs_args); mutable fs::path m_cached_datadir_path GUARDED_BY(cs_args); mutable fs::path m_cached_network_datadir_path GUARDED_BY(cs_args); diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 9bdadfe794c1..6c6831ce200a 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -5,9 +5,14 @@ """Test various command line arguments and configuration file parameters.""" import os +import pathlib +import re +import sys +import tempfile import time from test_framework.test_framework import BitcoinTestFramework +from test_framework.test_node import ErrorMatch from test_framework import util @@ -74,7 +79,7 @@ def test_config_file_parser(self): util.write_config(main_conf_file_path, n=0, chain='', extra_config=f'includeconf={inc_conf_file_path}\n') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('acceptnonstdtxn=1\n') - self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain') + self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}", "-allowignoredconf"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain') with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('nono\n') @@ -108,6 +113,41 @@ def test_config_file_parser(self): with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf: conf.write('') # clear + def test_config_file_log(self): + # Disable this test for windows currently because trying to override + # the default datadir through the environment does not seem to work. + if sys.platform == "win32": + return + + self.log.info('Test that correct configuration path is changed when configuration file changes the datadir') + + # Create a temporary directory that will be treated as the default data + # directory by dashd. + env, default_datadir = util.get_temp_default_datadir(pathlib.Path(self.options.tmpdir, "test_config_file_log")) + default_datadir.mkdir(parents=True) + + # Write a dash.conf file in the default data directory containing a + # datadir= line pointing at the node datadir. + node = self.nodes[0] + conf_text = pathlib.Path(node.bitcoinconf).read_text() + conf_path = default_datadir / "dash.conf" + conf_path.write_text(f"datadir={node.datadir}\n{conf_text}") + + # Drop the node -datadir= argument during this test, because if it is + # specified it would take precedence over the datadir setting in the + # config file. + node_args = node.args + node.args = [arg for arg in node.args if not arg.startswith("-datadir=")] + + # Check that correct configuration file path is actually logged + # (conf_path, not node.bitcoinconf) + with self.nodes[0].assert_debug_log(expected_msgs=[f"Config file: {conf_path}"]): + self.start_node(0, ["-allowignoredconf"], env=env) + self.stop_node(0) + + # Restore node arguments after the test + node.args = node_args + def test_invalid_command_line_options(self): self.nodes[0].assert_start_raises_init_error( expected_msg='Error: Error parsing command line arguments: Can not set -proxy with no value. Please specify value with -proxy=value.', @@ -278,6 +318,55 @@ def test_connect_with_seednode(self): unexpected_msgs=seednode_ignored): self.restart_node(0, extra_args=[connect_arg, '-seednode=fakeaddress2']) + def test_ignored_conf(self): + self.log.info('Test error is triggered when the datadir in use contains a dash.conf file that would be ignored ' + 'because a conflicting -conf file argument is passed.') + node = self.nodes[0] + with tempfile.NamedTemporaryFile(dir=self.options.tmpdir, mode="wt", delete=False) as temp_conf: + temp_conf.write(f"datadir={node.datadir}\n") + node.assert_start_raises_init_error([f"-conf={temp_conf.name}"], re.escape( + f'Error: Error reading configuration file: Data directory "{node.datadir}" contains a "dash.conf" file which is ignored, because a ' + f'different configuration file "{temp_conf.name}" from command line argument "-conf={temp_conf.name}" ' + f'is being used instead.') + r"[\s\S]*", match=ErrorMatch.FULL_REGEX) + + # Test that passing a redundant -conf command line argument pointing to + # the same dash.conf that would be loaded anyway does not trigger an + # error. + self.start_node(0, [f'-conf={node.datadir}/dash.conf']) + self.stop_node(0) + + def test_ignored_default_conf(self): + # Disable this test for windows currently because trying to override + # the default datadir through the environment does not seem to work. + if sys.platform == "win32": + return + + self.log.info('Test error is triggered when dash.conf in the default data directory sets another datadir ' + 'and it contains a different dash.conf file that would be ignored') + + # Create a temporary directory that will be treated as the default data + # directory by dashd. + env, default_datadir = util.get_temp_default_datadir(pathlib.Path(self.options.tmpdir, "home")) + default_datadir.mkdir(parents=True) + + # Write a dash.conf file in the default data directory containing a + # datadir= line pointing at the node datadir. This will trigger a + # startup error because the node datadir contains a different + # dash.conf that would be ignored. + node = self.nodes[0] + (default_datadir / "dash.conf").write_text(f"datadir={node.datadir}\n") + + # Drop the node -datadir= argument during this test, because if it is + # specified it would take precedence over the datadir setting in the + # config file. + node_args = node.args + node.args = [arg for arg in node.args if not arg.startswith("-datadir=")] + node.assert_start_raises_init_error([], re.escape( + f'Error: Error reading configuration file: Data directory "{node.datadir}" contains a "dash.conf" file which is ignored, because a ' + f'different configuration file "{default_datadir}/dash.conf" from data directory "{default_datadir}" ' + f'is being used instead.') + r"[\s\S]*", env=env, match=ErrorMatch.FULL_REGEX) + node.args = node_args + def run_test(self): self.test_log_buffer() self.test_args_log() @@ -287,7 +376,10 @@ def run_test(self): self.test_config_file_parser() + self.test_config_file_log() self.test_invalid_command_line_options() + self.test_ignored_conf() + self.test_ignored_default_conf() # Remove the -datadir argument so it doesn't override the config file self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")] diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 52f785c14ce5..47991c0e0a0c 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -222,7 +222,7 @@ def __getattr__(self, name): assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) - def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): + def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args @@ -251,6 +251,8 @@ def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") + if env is not None: + subp_env.update(env) self.process = subprocess.Popen(all_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 7cdcbc8983d8..a72887d87f45 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -13,15 +13,17 @@ import json import logging import os +import pathlib import random import shutil import re +import sys import time import urllib.parse from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException -from typing import Callable, Optional +from typing import Callable, Optional, Tuple logger = logging.getLogger("TestFramework.utils") @@ -432,6 +434,22 @@ def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) +def get_temp_default_datadir(temp_dir: pathlib.Path) -> Tuple[dict, pathlib.Path]: + """Return os-specific environment variables that can be set to make the + GetDefaultDataDir() function return a datadir path under the provided + temp_dir, as well as the complete path it would return.""" + if sys.platform == "win32": + env = dict(APPDATA=str(temp_dir)) + datadir = temp_dir / "DashCore" + else: + env = dict(HOME=str(temp_dir)) + if sys.platform == "darwin": + datadir = temp_dir / "Library/Application Support/DashCore" + else: + datadir = temp_dir / ".dashcore" + return env, datadir + + def append_config(datadir, options): with open(os.path.join(datadir, "dash.conf"), 'a', encoding='utf8') as f: for option in options: diff --git a/test/lint/run-lint-format-strings.py b/test/lint/run-lint-format-strings.py index 4e06a93f44d3..0094e031f256 100755 --- a/test/lint/run-lint-format-strings.py +++ b/test/lint/run-lint-format-strings.py @@ -245,20 +245,32 @@ def count_format_specifiers(format_string): 3 >>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo") 4 + >>> count_format_specifiers("foo %5$d") + 5 + >>> count_format_specifiers("foo %5$*7$d") + 7 """ assert type(format_string) is str format_string = format_string.replace('%%', 'X') - n = 0 - in_specifier = False - for i, char in enumerate(format_string): - if char == "%": - in_specifier = True + n = max_pos = 0 + for m in re.finditer("%(.*?)[aAcdeEfFgGinopsuxX]", format_string, re.DOTALL): + # Increase the max position if the argument has a position number like + # "5$", otherwise increment the argument count. + pos_num, = re.match(r"(?:(^\d+)\$)?", m.group(1)).groups() + if pos_num is not None: + max_pos = max(max_pos, int(pos_num)) + else: n += 1 - elif char in "aAcdeEfFgGinopsuxX": - in_specifier = False - elif in_specifier and char == "*": + + # Increase the max position if there is a "*" width argument with a + # position like "*7$", and increment the argument count if there is a + # "*" width argument with no position. + star, star_pos_num = re.match(r"(?:.*?(\*(?:(\d+)\$)?)|)", m.group(1)).groups() + if star_pos_num is not None: + max_pos = max(max_pos, int(star_pos_num)) + elif star is not None: n += 1 - return n + return max(n, max_pos) def main(): From 9598089c9cac4e916f560027405c1be67c3cb689 Mon Sep 17 00:00:00 2001 From: PastaClaw Date: Sat, 25 Apr 2026 09:32:55 -0500 Subject: [PATCH 5/5] wallet, bench: Move commonly used functions to their own file and fix a bug --- src/Makefile.test_util.include | 5 +++ src/bench/wallet_balance.cpp | 9 ++---- src/bench/wallet_loading.cpp | 38 +++++----------------- src/wallet/test/load_util.cpp | 54 ++++++++++++++++++++++++++++++++ src/wallet/test/util.h | 6 ++++ src/wallet/test/wallet_tests.cpp | 37 +++++----------------- 6 files changed, 82 insertions(+), 67 deletions(-) create mode 100644 src/wallet/test/load_util.cpp diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index 749d3a5f1d62..907da0eda1d7 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -43,3 +43,8 @@ libtest_util_a_SOURCES = \ test/util/validation.cpp \ test/util/wallet.cpp \ $(TEST_UTIL_H) + +if ENABLE_WALLET +libtest_util_a_SOURCES += \ + wallet/test/load_util.cpp +endif diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp index 36a95f5765a0..685295b6b4df 100644 --- a/src/bench/wallet_balance.cpp +++ b/src/bench/wallet_balance.cpp @@ -8,17 +8,14 @@ #include #include #include +#include #include #include #include #include -using wallet::CreateMockWalletDatabase; -using wallet::CWallet; -using wallet::DBErrors; -using wallet::WALLET_FLAG_DESCRIPTORS; - +namespace wallet { static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const bool add_mine, const uint32_t epoch_iters) { const auto test_setup = MakeNoLogFileContext(); @@ -29,7 +26,6 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b LOCK(wallet.cs_wallet); wallet.SetWalletFlag(WALLET_FLAG_DESCRIPTORS); wallet.SetupDescriptorScriptPubKeyMans("", ""); - if (wallet.LoadWallet() != DBErrors::LOAD_OK) assert(false); } auto handler = test_setup->m_node.chain->handleNotifications({&wallet, [](CWallet*) {}}); @@ -59,3 +55,4 @@ BENCHMARK(WalletBalanceDirty, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceClean, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceMine, benchmark::PriorityLevel::HIGH); BENCHMARK(WalletBalanceWatch, benchmark::PriorityLevel::HIGH); +} // namespace wallet diff --git a/src/bench/wallet_loading.cpp b/src/bench/wallet_loading.cpp index a81d79936e5b..7c1556b17f9f 100644 --- a/src/bench/wallet_loading.cpp +++ b/src/bench/wallet_loading.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -16,33 +17,7 @@ #include -using wallet::CWallet; -using wallet::DatabaseFormat; -using wallet::DatabaseOptions; -using wallet::TxStateInactive; -using wallet::WALLET_FLAG_DESCRIPTORS; -using wallet::WalletContext; -using wallet::WalletDatabase; - -static std::shared_ptr BenchLoadWallet(std::unique_ptr database, WalletContext& context, DatabaseOptions& options) -{ - bilingual_str error; - std::vector warnings; - auto wallet = CWallet::Create(context, "", std::move(database), options.create_flags, error, warnings); - NotifyWalletLoaded(context, wallet); - if (context.chain) { - wallet->postInitProcess(); - } - return wallet; -} - -static void BenchUnloadWallet(std::shared_ptr&& wallet) -{ - SyncWithValidationInterfaceQueue(); - wallet->m_chain_notifications_handler.reset(); - UnloadWallet(std::move(wallet)); -} - +namespace wallet { static void AddTx(CWallet& wallet) { CMutableTransaction mtx; @@ -95,7 +70,7 @@ static void WalletLoading(benchmark::Bench& bench, bool legacy_wallet) options.require_format = DatabaseFormat::SQLITE; } auto database = CreateMockWalletDatabase(options); - auto wallet = BenchLoadWallet(std::move(database), context, options); + auto wallet = TestLoadWallet(std::move(database), context, options.create_flags); // Generate a bunch of transactions and addresses to put into the wallet for (int i = 0; i < 1000; ++i) { @@ -105,14 +80,14 @@ static void WalletLoading(benchmark::Bench& bench, bool legacy_wallet) database = DuplicateMockDatabase(wallet->GetDatabase(), options); // reload the wallet for the actual benchmark - BenchUnloadWallet(std::move(wallet)); + TestUnloadWallet(context, std::move(wallet)); bench.epochs(5).run([&] { - wallet = BenchLoadWallet(std::move(database), context, options); + wallet = TestLoadWallet(std::move(database), context, options.create_flags); // Cleanup database = DuplicateMockDatabase(wallet->GetDatabase(), options); - BenchUnloadWallet(std::move(wallet)); + TestUnloadWallet(context, std::move(wallet)); }); } @@ -125,3 +100,4 @@ BENCHMARK(WalletLoadingLegacy, benchmark::PriorityLevel::HIGH); static void WalletLoadingDescriptors(benchmark::Bench& bench) { WalletLoading(bench, /*legacy_wallet=*/false); } BENCHMARK(WalletLoadingDescriptors, benchmark::PriorityLevel::HIGH); #endif +} // namespace wallet diff --git a/src/wallet/test/load_util.cpp b/src/wallet/test/load_util.cpp new file mode 100644 index 000000000000..999ed7a24461 --- /dev/null +++ b/src/wallet/test/load_util.cpp @@ -0,0 +1,54 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace wallet { +std::shared_ptr TestLoadWallet(std::unique_ptr database, WalletContext& context, uint64_t create_flags) +{ + bilingual_str error; + std::vector warnings; + auto wallet = CWallet::Create(context, "", std::move(database), create_flags, error, warnings); + if (context.coinjoin_loader) { + // TODO: see CreateWalletWithoutChain + AddWallet(context, wallet); + } + NotifyWalletLoaded(context, wallet); + if (context.chain) { + wallet->postInitProcess(); + } + return wallet; +} + +std::shared_ptr TestLoadWallet(WalletContext& context) +{ + DatabaseOptions options; + options.create_flags = WALLET_FLAG_DESCRIPTORS; + DatabaseStatus status; + bilingual_str error; + auto database = MakeWalletDatabase("", options, status, error); + return TestLoadWallet(std::move(database), context, options.create_flags); +} + +void TestUnloadWallet(WalletContext& context, std::shared_ptr&& wallet) +{ + std::vector warnings; + SyncWithValidationInterfaceQueue(); + wallet->m_chain_notifications_handler.reset(); + if (context.coinjoin_loader) { + RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings); + } + UnloadWallet(std::move(wallet)); +} +} // namespace wallet diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h index b6293072374d..43cfa7b13895 100644 --- a/src/wallet/test/util.h +++ b/src/wallet/test/util.h @@ -5,6 +5,7 @@ #ifndef BITCOIN_WALLET_TEST_UTIL_H #define BITCOIN_WALLET_TEST_UTIL_H +#include #include class ArgsManager; @@ -19,8 +20,13 @@ class Loader; namespace wallet { class CWallet; +class WalletDatabase; +struct WalletContext; std::unique_ptr CreateSyncedWallet(interfaces::Chain& chain, interfaces::CoinJoin::Loader& coinjoin_loader, CChain& cchain, ArgsManager& args, const CKey& key); +std::shared_ptr TestLoadWallet(WalletContext& context); +std::shared_ptr TestLoadWallet(std::unique_ptr database, WalletContext& context, uint64_t create_flags); +void TestUnloadWallet(WalletContext& context, std::shared_ptr&& wallet); } // namespace wallet #endif // BITCOIN_WALLET_TEST_UTIL_H diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index a46efa651414..ef2f95a27317 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -51,32 +51,6 @@ static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet mi BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup) -static std::shared_ptr TestLoadWallet(WalletContext& context) -{ - DatabaseOptions options; - options.create_flags = WALLET_FLAG_DESCRIPTORS; - DatabaseStatus status; - bilingual_str error; - std::vector warnings; - auto database = MakeWalletDatabase("", options, status, error); - auto wallet = CWallet::Create(context, "", std::move(database), options.create_flags, error, warnings); - if (context.coinjoin_loader) { - // TODO: see CreateWalletWithoutChain - AddWallet(context, wallet); - } - NotifyWalletLoaded(context, wallet); - return wallet; -} - -static void TestUnloadWallet(WalletContext& context, std::shared_ptr&& wallet) -{ - std::vector warnings; - SyncWithValidationInterfaceQueue(); - wallet->m_chain_notifications_handler.reset(); - RemoveWallet(context, wallet, /*load_on_start=*/std::nullopt, warnings); - UnloadWallet(std::move(wallet)); -} - static CMutableTransaction TestSimpleSpend(const CTransaction& from, uint32_t index, const CKey& key, const CScript& pubkey) { CMutableTransaction mtx; @@ -820,8 +794,9 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) // being blocked wallet = TestLoadWallet(context); BOOST_CHECK(rescan_completed); - // AddToWallet events for block_tx and mempool_tx - BOOST_CHECK_EQUAL(addtx_count, 2); + // Loading will also ask for current mempool transactions + // AddToWallet events for block_tx and mempool_tx (x2) + BOOST_CHECK_EQUAL(addtx_count, 3); { LOCK(wallet->cs_wallet); BOOST_CHECK_EQUAL(wallet->mapWallet.count(block_tx.GetHash()), 1U); @@ -835,7 +810,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) SyncWithValidationInterfaceQueue(); // AddToWallet events for block_tx and mempool_tx events are counted a // second time as the notification queue is processed - BOOST_CHECK_EQUAL(addtx_count, 4); + BOOST_CHECK_EQUAL(addtx_count, 5); TestUnloadWallet(context, std::move(wallet)); @@ -857,7 +832,9 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup) SyncWithValidationInterfaceQueue(); }); wallet = TestLoadWallet(context); - BOOST_CHECK_EQUAL(addtx_count, 2); + // Since mempool transactions are requested at the end of loading, there will + // be 2 additional AddToWallet calls, one from the previous test, and a duplicate for mempool_tx + BOOST_CHECK_EQUAL(addtx_count, 2 + 2); { LOCK(wallet->cs_wallet); BOOST_CHECK_EQUAL(wallet->mapWallet.count(block_tx.GetHash()), 1U);