diff --git a/share/rpcauth/README.md b/share/rpcauth/README.md index 6f627b867bca..1b3acb1dac16 100644 --- a/share/rpcauth/README.md +++ b/share/rpcauth/README.md @@ -15,4 +15,5 @@ positional arguments: optional arguments: -h, --help show this help message and exit + -j, --json output data in json format ``` diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py index 6f94f8fe770c..70b3706f3c0c 100755 --- a/share/rpcauth/rpcauth.py +++ b/share/rpcauth/rpcauth.py @@ -7,6 +7,7 @@ from getpass import getpass from secrets import token_hex, token_urlsafe import hmac +import json def generate_salt(size): """Create size byte hex salt""" @@ -24,6 +25,7 @@ def main(): parser = ArgumentParser(description='Create login credentials for a JSON-RPC user') parser.add_argument('username', help='the username for authentication') parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?') + parser.add_argument("-j", "--json", help="output to json instead of plain-text", action='store_true') args = parser.parse_args() if not args.password: @@ -35,9 +37,13 @@ def main(): salt = generate_salt(16) password_hmac = password_to_hmac(salt, args.password) - print('String to be appended to dash.conf:') - print(f'rpcauth={args.username}:{salt}${password_hmac}') - print(f'Your password:\n{args.password}') + if args.json: + odict={'username':args.username, 'password':args.password, 'rpcauth':f'{args.username}:{salt}${password_hmac}'} + print(json.dumps(odict)) + else: + print('String to be appended to dash.conf:') + print(f'rpcauth={args.username}:{salt}${password_hmac}') + print(f'Your password:\n{args.password}') if __name__ == '__main__': main() diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index be7bfe2d2dcc..9bc2bc3894ef 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -45,6 +45,7 @@ bench_bench_dash_SOURCES = \ bench/merkle_root.cpp \ bench/nanobench.cpp \ bench/nanobench.h \ + bench/parse_hex.cpp \ bench/peer_eviction.cpp \ bench/poly1305.cpp \ bench/pool.cpp \ diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 83bc6192ff7b..158407452816 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -39,7 +39,8 @@ bool SerializeDB(Stream& stream, const Data& data) hashwriter << Params().MessageStart() << data; stream << hashwriter.GetHash(); } catch (const std::exception& e) { - return error("%s: Serialize or I/O error - %s", __func__, e.what()); + LogError("%s: Serialize or I/O error - %s\n", __func__, e.what()); + return false; } return true; @@ -59,7 +60,8 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data if (fileout.IsNull()) { fileout.fclose(); remove(pathTmp); - return error("%s: Failed to open file %s", __func__, fs::PathToString(pathTmp)); + LogError("%s: Failed to open file %s\n", __func__, fs::PathToString(pathTmp)); + return false; } // Serialize @@ -71,14 +73,16 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data if (!FileCommit(fileout.Get())) { fileout.fclose(); remove(pathTmp); - return error("%s: Failed to flush file %s", __func__, fs::PathToString(pathTmp)); + LogError("%s: Failed to flush file %s\n", __func__, fs::PathToString(pathTmp)); + return false; } fileout.fclose(); // replace existing file, if any, with new file if (!RenameOver(pathTmp, path)) { remove(pathTmp); - return error("%s: Rename-into-place failed", __func__); + LogError("%s: Rename-into-place failed\n", __func__); + return false; } return true; @@ -136,7 +140,7 @@ bool CBanDB::Write(const banmap_t& banSet) } for (const auto& err : errors) { - error("%s", err); + LogError("%s\n", err); } return false; } diff --git a/src/bench/parse_hex.cpp b/src/bench/parse_hex.cpp new file mode 100644 index 000000000000..db3ead043c81 --- /dev/null +++ b/src/bench/parse_hex.cpp @@ -0,0 +1,36 @@ +// Copyright (c) 2024- The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + +std::string generateHexString(size_t length) { + const auto hex_digits = "0123456789ABCDEF"; + FastRandomContext rng(/*fDeterministic=*/true); + + std::string data; + while (data.size() < length) { + auto digit = hex_digits[rng.randbits(4)]; + data.push_back(digit); + } + return data; +} + +static void HexParse(benchmark::Bench& bench) +{ + auto data = generateHexString(130); // Generates 678B0EDA0A1FD30904D5A65E3568DB82DB2D918B0AD8DEA18A63FECCB877D07CAD1495C7157584D877420EF38B8DA473A6348B4F51811AC13C786B962BEE5668F9 by default + + bench.batch(data.size()).unit("base16").run([&] { + auto result = TryParseHex(data); + assert(result != std::nullopt); // make sure we're measuring the successful case + ankerl::nanobench::doNotOptimizeAway(result); + }); +} + +BENCHMARK(HexParse, benchmark::PriorityLevel::HIGH); diff --git a/src/flatfile.cpp b/src/flatfile.cpp index 0fecf4f50499..1b4433186ebb 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -82,15 +82,18 @@ bool FlatFileSeq::Flush(const FlatFilePos& pos, bool finalize) { FILE* file = Open(FlatFilePos(pos.nFile, 0)); // Avoid fseek to nPos if (!file) { - return error("%s: failed to open file %d", __func__, pos.nFile); + LogError("%s: failed to open file %d\n", __func__, pos.nFile); + return false; } if (finalize && !TruncateFile(file, pos.nPos)) { fclose(file); - return error("%s: failed to truncate file %d", __func__, pos.nFile); + LogError("%s: failed to truncate file %d\n", __func__, pos.nFile); + return false; } if (!FileCommit(file)) { fclose(file); - return error("%s: failed to commit file %d", __func__, pos.nFile); + LogError("%s: failed to commit file %d\n", __func__, pos.nFile); + return false; } DirectoryCommit(m_dir); diff --git a/src/index/base.cpp b/src/index/base.cpp index d585cac6dd53..0a02e76a2a0c 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -204,7 +204,8 @@ bool BaseIndex::Commit() { CDBBatch batch(GetDB()); if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) { - return error("%s: Failed to commit latest %s state", __func__, GetName()); + LogError("%s: Failed to commit latest %s state\n", __func__, GetName()); + return false; } return true; } diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index 5449a93b0954..635064afdc9b 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -146,8 +146,9 @@ bool BlockFilterIndex::Init() // indicate database corruption or a disk failure, and starting the index would cause // further corruption. if (m_db->Exists(DB_FILTER_POS)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } // If the DB_FILTER_POS is not set, then initialize to the first location. @@ -169,10 +170,12 @@ bool BlockFilterIndex::CommitInternal(CDBBatch& batch) // Flush current filter file to disk. AutoFile file{m_filter_fileseq->Open(pos)}; if (file.IsNull()) { - return error("%s: Failed to open filter file %d", __func__, pos.nFile); + LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile); + return false; } if (!FileCommit(file.Get())) { - return error("%s: Failed to commit filter file %d", __func__, pos.nFile); + LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile); + return false; } batch.Write(DB_FILTER_POS, pos); @@ -191,11 +194,15 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256& std::vector encoded_filter; try { filein >> block_hash >> encoded_filter; - if (Hash(encoded_filter) != hash) return error("Checksum mismatch in filter decode."); + if (Hash(encoded_filter) != hash) { + LogError("Checksum mismatch in filter decode.\n"); + return false; + } filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true); } catch (const std::exception& e) { - return error("%s: Failed to deserialize block filter from disk: %s", __func__, e.what()); + LogError("%s: Failed to deserialize block filter from disk: %s\n", __func__, e.what()); + return false; } return true; @@ -264,8 +271,9 @@ bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex uint256 expected_block_hash = pindex->pprev->GetBlockHash(); if (read_out.first != expected_block_hash) { - return error("%s: previous block header belongs to unexpected block %s; expected %s", + LogError("%s: previous block header belongs to unexpected block %s; expected %s\n", __func__, read_out.first.ToString(), expected_block_hash.ToString()); + return false; } prev_header = read_out.second.header; @@ -299,14 +307,16 @@ bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { - return error("%s: unexpected key in %s: expected (%c, %d)", + LogError("%s: unexpected key in %s: expected (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } std::pair value; if (!db_it.GetValue(value)) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } batch.Write(DBHashKey(value.first), std::move(value.second)); @@ -361,11 +371,13 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start const CBlockIndex* stop_index, std::vector& results) { if (start_height < 0) { - return error("%s: start height (%d) is negative", __func__, start_height); + LogError("%s: start height (%d) is negative\n", __func__, start_height); + return false; } if (start_height > stop_index->nHeight) { - return error("%s: start height (%d) is greater than stop height (%d)", + LogError("%s: start height (%d) is greater than stop height (%d)\n", __func__, start_height, stop_index->nHeight); + return false; } size_t results_size = static_cast(stop_index->nHeight - start_height + 1); @@ -381,8 +393,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start size_t i = static_cast(height - start_height); if (!db_it->GetValue(values[i])) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } db_it->Next(); @@ -404,8 +417,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start } if (!db.Read(DBHashKey(block_hash), results[i])) { - return error("%s: unable to read value in %s at key (%c, %s)", + LogError("%s: unable to read value in %s at key (%c, %s)\n", __func__, index_name, DB_BLOCK_HASH, block_hash.ToString()); + return false; } } diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index 49c81ba1efbc..5a53920f5806 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -135,8 +135,9 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) read_out.first.ToString(), expected_block_hash.ToString()); if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header not found; expected %s", + LogError("%s: previous block header not found; expected %s\n", __func__, expected_block_hash.ToString()); + return false; } } @@ -241,14 +242,16 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { - return error("%s: unexpected key in %s: expected (%c, %d)", + LogError("%s: unexpected key in %s: expected (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } std::pair value; if (!db_it.GetValue(value)) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } batch.Write(DBHashKey(value.first), std::move(value.second)); @@ -283,8 +286,9 @@ bool CoinStatsIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* n CBlock block; if (!ReadBlockFromDisk(block, iter_tip, consensus_params)) { - return error("%s: Failed to read block %s from disk", + LogError("%s: Failed to read block %s from disk\n", __func__, iter_tip->GetBlockHash().ToString()); + return false; } if (!ReverseBlock(block, iter_tip)) { @@ -351,8 +355,9 @@ bool CoinStatsIndex::Init() // exist. Any other errors indicate database corruption or a disk // failure, and starting the index would cause further corruption. if (m_db->Exists(DB_MUHASH)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } } @@ -363,14 +368,16 @@ bool CoinStatsIndex::Init() if (pindex) { DBVal entry; if (!LookUpOne(*m_db, pindex, entry)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } uint256 out; m_muhash.Finalize(out); if (entry.muhash != out) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } m_transaction_output_count = entry.transaction_output_count; m_bogo_size = entry.bogo_size; @@ -422,8 +429,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex read_out.first.ToString(), expected_block_hash.ToString()); if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header not found; expected %s", + LogError("%s: previous block header not found; expected %s\n", __func__, expected_block_hash.ToString()); + return false; } } } diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp index 1a6a0cf7fe01..a915231c4019 100644 --- a/src/index/txindex.cpp +++ b/src/index/txindex.cpp @@ -82,20 +82,24 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe AutoFile file{OpenBlockFile(postx, true)}; if (file.IsNull()) { - return error("%s: OpenBlockFile failed", __func__); + LogError("%s: OpenBlockFile failed\n", __func__); + return false; } CBlockHeader header; try { file >> header; if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) { - return error("%s: fseek(...) failed", __func__); + LogError("%s: fseek(...) failed\n", __func__); + return false; } file >> tx; } catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + return false; } if (tx->GetHash() != tx_hash) { - return error("%s: txid mismatch", __func__); + LogError("%s: txid mismatch\n", __func__); + return false; } block_hash = header.GetHash(); return true; diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp index 5e8549efcb45..a649aab470fb 100644 --- a/src/kernel/coinstats.cpp +++ b/src/kernel/coinstats.cpp @@ -120,7 +120,8 @@ static bool ComputeUTXOStats(CCoinsView* view, CCoinsStats& stats, T hash_obj, c outputs[key.n] = std::move(coin); stats.coins_count++; } else { - return error("%s: unable to read value", __func__); + LogError("%s: unable to read value\n", __func__); + return false; } pcursor->Next(); } diff --git a/src/logging.h b/src/logging.h index d0b2069319f0..cc37a65a8449 100644 --- a/src/logging.h +++ b/src/logging.h @@ -257,7 +257,7 @@ std::string SafeStringFormat(const std::string& fmt, const Args&... args) } } -// Be conservative when using LogPrintf/error or other things which +// Be conservative when using functions that // unconditionally log to debug.log! It should not be the case that an inbound // peer can fill up a user's disk with debug.log entries. diff --git a/src/net.cpp b/src/net.cpp index 21648dbd01c5..a3a1a9ade7ad 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -570,7 +570,7 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) { AssertLockNotHeld(m_addr_local_mutex); LOCK(m_addr_local_mutex); if (addrLocal.IsValid()) { - error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort()); + LogError("Addr local already set for node: %i. Refusing to change from %s to %s\n", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort()); } else { addrLocal = addrLocalIn; } diff --git a/src/netbase.cpp b/src/netbase.cpp index 9144f5bd20e0..b6e81795d08e 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -372,7 +372,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a IntrRecvError recvr; LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest); if (strDest.size() > 255) { - return error("Hostname too long"); + LogError("Hostname too long\n"); + return false; } // Construct the version identifier/method selection message std::vector vSocks5Init; @@ -387,7 +388,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a } ssize_t ret = sock.Send(vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5Init.size()) { - return error("Error sending to proxy"); + LogError("Error sending to proxy\n"); + return false; } uint8_t pchRet1[2]; if (InterruptibleRecv(pchRet1, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { @@ -395,34 +397,42 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a return false; } if (pchRet1[0] != SOCKSVersion::SOCKS5) { - return error("Proxy failed to initialize"); + LogError("Proxy failed to initialize\n"); + return false; } if (pchRet1[1] == SOCKS5Method::USER_PASS && auth) { // Perform username/password authentication (as described in RFC1929) std::vector vAuth; vAuth.push_back(0x01); // Current (and only) version of user/pass subnegotiation - if (auth->username.size() > 255 || auth->password.size() > 255) - return error("Proxy username or password too long"); + if (auth->username.size() > 255 || auth->password.size() > 255) { + LogError("Proxy username or password too long\n"); + return false; + } + vAuth.push_back(auth->username.size()); vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end()); vAuth.push_back(auth->password.size()); vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end()); ret = sock.Send(vAuth.data(), vAuth.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vAuth.size()) { - return error("Error sending authentication to proxy"); + LogError("Error sending authentication to proxy\n"); + return false; } LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password); uint8_t pchRetA[2]; if (InterruptibleRecv(pchRetA, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { - return error("Error reading proxy authentication response"); + LogError("Error reading proxy authentication response\n"); + return false; } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { - return error("Proxy authentication unsuccessful"); + LogError("Proxy authentication unsuccessful\n"); + return false; } } else if (pchRet1[1] == SOCKS5Method::NOAUTH) { // Perform no authentication } else { - return error("Proxy requested wrong authentication method %02x", pchRet1[1]); + LogError("Proxy requested wrong authentication method %02x\n", pchRet1[1]); + return false; } std::vector vSocks5; vSocks5.push_back(SOCKSVersion::SOCKS5); // VER protocol version @@ -435,7 +445,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a vSocks5.push_back((port >> 0) & 0xFF); ret = sock.Send(vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5.size()) { - return error("Error sending to proxy"); + LogError("Error sending to proxy\n"); + return false; } uint8_t pchRet2[4]; if ((recvr = InterruptibleRecv(pchRet2, 4, g_socks5_recv_timeout, sock)) != IntrRecvError::OK) { @@ -445,11 +456,13 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a * error message. */ return false; } else { - return error("Error while reading proxy response"); + LogError("Error while reading proxy response\n"); + return false; } } if (pchRet2[0] != SOCKSVersion::SOCKS5) { - return error("Proxy failed to accept request"); + LogError("Proxy failed to accept request\n"); + return false; } if (pchRet2[1] != SOCKS5Reply::SUCCEEDED) { // Failures to connect to a peer that are not proxy errors @@ -457,7 +470,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a return false; } if (pchRet2[2] != 0x00) { // Reserved field must be 0 - return error("Error: malformed proxy response"); + LogError("Error: malformed proxy response\n"); + return false; } uint8_t pchRet3[256]; switch (pchRet2[3]) @@ -468,19 +482,25 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a { recvr = InterruptibleRecv(pchRet3, 1, g_socks5_recv_timeout, sock); if (recvr != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } int nRecv = pchRet3[0]; recvr = InterruptibleRecv(pchRet3, nRecv, g_socks5_recv_timeout, sock); break; } - default: return error("Error: malformed proxy response"); + default: { + LogError("Error: malformed proxy response\n"); + return false; + } } if (recvr != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } if (InterruptibleRecv(pchRet3, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest); return true; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index d6606442475a..0e89d24f3e7f 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -291,7 +291,8 @@ bool BlockManager::LoadBlockIndex() for (CBlockIndex* pindex : vSortedByHeight) { if (ShutdownRequested()) return false; if (previous_index && pindex->nHeight > previous_index->nHeight + 1) { - return error("%s: block index is non-contiguous, index of height %d missing", __func__, previous_index->nHeight + 1); + LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1); + return false; } previous_index = pindex; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); @@ -489,7 +490,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { - return error("%s: OpenUndoFile failed", __func__); + LogError("%s: OpenUndoFile failed\n", __func__); + return false; } // Write index header @@ -499,7 +501,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { - return error("%s: ftell failed", __func__); + LogError("%s: ftell failed\n", __func__); + return false; } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; @@ -518,13 +521,15 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())}; if (pos.IsNull()) { - return error("%s: no undo data available", __func__); + LogError("%s: no undo data available\n", __func__); + return false; } // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { - return error("%s: OpenUndoFile failed", __func__); + LogError("%s: OpenUndoFile failed\n", __func__); + return false; } // Read block @@ -535,12 +540,14 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + return false; } // Verify checksum if (hashChecksum != verifier.GetHash()) { - return error("%s: Checksum mismatch", __func__); + LogError("%s: Checksum mismatch\n", __func__); + return false; } return true; @@ -708,7 +715,8 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { - return error("WriteBlockToDisk: OpenBlockFile failed"); + LogError("WriteBlockToDisk: OpenBlockFile failed\n"); + return false; } // Write index header @@ -718,7 +726,8 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { - return error("WriteBlockToDisk: ftell failed"); + LogError("WriteBlockToDisk: ftell failed\n"); + return false; } pos.nPos = (unsigned int)fileOutPos; fileout << block; @@ -733,7 +742,8 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) { - return error("ConnectBlock(): FindUndoPos failed"); + LogError("ConnectBlock(): FindUndoPos failed\n"); + return false; } if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash(), GetParams().MessageStart())) { return AbortNode(state, "Failed to write undo data"); @@ -796,6 +806,7 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus if (*hash != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", pindex->ToString(), block_pos.ToString()); + return false; } return true; } @@ -814,7 +825,7 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CCha nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); } if (!FindBlockPos(blockPos, nBlockSize, nHeight, active_chain, block.GetBlockTime(), position_known)) { - error("%s: FindBlockPos failed", __func__); + LogError("%s: FindBlockPos failed\n", __func__); return FlatFilePos(); } if (!position_known) { diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index 3f6c9c2ad7c8..2eee235126ad 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -110,8 +110,10 @@ bool FillableSigningProvider::GetKey(const CKeyID &address, CKey &keyOut) const bool FillableSigningProvider::AddCScript(const CScript& redeemScript) { - if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) - return error("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid", MAX_SCRIPT_ELEMENT_SIZE); + if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) { + LogError("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid\n", MAX_SCRIPT_ELEMENT_SIZE); + return false; + } LOCK(cs_KeyStore); mapScripts[CScriptID(redeemScript)] = redeemScript; diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index d0b6cc774ccf..ea5b8e5e1e2d 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -84,6 +84,8 @@ BOOST_AUTO_TEST_CASE(sizes) BOOST_CHECK_EQUAL(GetSerializeSize(int64_t(0), 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(uint64_t(0), 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(bool(0), 0), 1U); + BOOST_CHECK_EQUAL(GetSerializeSize(std::array{0}, 0), 1U); + BOOST_CHECK_EQUAL(GetSerializeSize(std::array{0, 0}, 0), 2U); } BOOST_AUTO_TEST_CASE(varints) @@ -178,6 +180,16 @@ BOOST_AUTO_TEST_CASE(vector_bool) BOOST_CHECK(SerializeHash(vec1) == SerializeHash(vec2)); } +BOOST_AUTO_TEST_CASE(array) +{ + std::array array1{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1}; + DataStream ds; + ds << array1; + std::array array2; + ds >> array2; + BOOST_CHECK(array1 == array2); +} + BOOST_AUTO_TEST_CASE(noncanonical) { // Write some non-canonical CompactSize encodings, and diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp index cbbbb5c9692b..5f0aea5d5cd0 100644 --- a/src/util/strencodings.cpp +++ b/src/util/strencodings.cpp @@ -81,6 +81,8 @@ template std::optional> TryParseHex(std::string_view str) { std::vector vch; + vch.reserve(str.size() / 2); // two hex characters form a single byte + auto it = str.begin(); while (it != str.end()) { if (IsSpace(*it)) { diff --git a/src/validation.cpp b/src/validation.cpp index 2a16f339b631..8a77f9c80bbc 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -771,8 +771,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return false; // state filled in by CheckTransaction } - if (!ContextualCheckTransaction(tx, state, chainparams.GetConsensus(), m_active_chainstate.m_chain.Tip())) - return error("%s: ContextualCheckTransaction: %s, %s", __func__, hash.ToString(), state.ToString()); + if (!ContextualCheckTransaction(tx, state, chainparams.GetConsensus(), m_active_chainstate.m_chain.Tip())) { + LogError("%s: ContextualCheckTransaction: %s, %s\n", __func__, hash.ToString(), state.ToString()); + return false; + } if (tx.IsSpecialTxVersion() && tx.nType == TRANSACTION_QUORUM_COMMITMENT) { // quorum commitment is not allowed outside of blocks @@ -2019,12 +2021,12 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI CBlockUndo blockUndo; if (!UndoReadFromDisk(blockUndo, pindex)) { - error("DisconnectBlock(): failure reading undo data"); + LogError("DisconnectBlock(): failure reading undo data\n"); return DISCONNECT_FAILED; } if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { - error("DisconnectBlock(): block and undo data inconsistent"); + LogError("DisconnectBlock(): block and undo data inconsistent\n"); return DISCONNECT_FAILED; } @@ -2092,7 +2094,7 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI if (i > 0) { // not coinbases CTxUndo &txundo = blockUndo.vtxundo[i-1]; if (txundo.vprevout.size() != tx.vin.size()) { - error("DisconnectBlock(): transaction and undo data inconsistent"); + LogError("DisconnectBlock(): transaction and undo data inconsistent\n"); return DISCONNECT_FAILED; } for (unsigned int j = tx.vin.size(); j > 0;) { @@ -2313,7 +2315,8 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // problems. return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down"); } - return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); + LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); + return false; } if (pindex->pprev && pindex->phashBlock && m_chain_helper->HasConflictingChainLock(pindex->nHeight, pindex->GetBlockHash())) { @@ -2457,8 +2460,9 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // MUST process special txes before updating UTXO to ensure consistency between mempool and block processing std::optional mnlist_updates_opt{std::nullopt}; if (!m_chain_helper->special_tx->ProcessSpecialTxsInBlock(block, pindex, view, fJustCheck, fScriptChecks, state, mnlist_updates_opt)) { - return error("ConnectBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s", - pindex->GetBlockHash().ToString(), state.ToString()); + LogError("ConnectBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s\n", + pindex->GetBlockHash().ToString(), state.ToString()); + return false; } int64_t nTime2_1 = GetTimeMicros(); nTimeProcessSpecial += nTime2_1 - nTime2; @@ -2479,7 +2483,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, TxValidationState tx_state; if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) { // Any transaction validation failure in ConnectBlock is a block consensus failure - LogPrintf("ERROR: %s: Consensus::CheckTxInputs: %s, %s\n", __func__, tx.GetHash().ToString(), state.ToString()); + LogError("%s: Consensus::CheckTxInputs: %s, %s\n", __func__, tx.GetHash().ToString(), state.ToString()); return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); } @@ -2553,7 +2557,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); - LogPrintf("ERROR: ConnectBlock(): CheckInputScripts on %s failed with %s\n", + LogError("ConnectBlock(): CheckInputScripts on %s failed with %s\n", tx.GetHash().ToString(), state.ToString()); return false; } @@ -3046,7 +3050,8 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr std::shared_ptr pblock = std::make_shared(); CBlock& block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) { - return error("DisconnectTip(): Failed to read block"); + LogError("DisconnectTip(): Failed to read block\n"); + return false; } // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); @@ -3055,8 +3060,10 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr CCoinsViewCache view(&CoinsTip()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); - if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) - return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); + if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { + LogError("DisconnectTip(): DisconnectBlock %s failed\n", pindexDelete->GetBlockHash().ToString()); + return false; + } bool flushed = view.Flush(); assert(flushed); dbTx->Commit(); @@ -3200,7 +3207,8 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew if (!rv) { if (state.IsInvalid()) InvalidBlockFound(pindexNew, state); - return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString()); + LogError("%s: ConnectBlock %s failed, %s\n", __func__, pindexNew->GetBlockHash().ToString(), state.ToString()); + return false; } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; assert(nBlocksTotal > 0); @@ -4419,7 +4427,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr& pblock, Block pindex->nStatus |= BLOCK_FAILED_VALID; m_blockman.m_dirty_blockindex.insert(pindex); } - return error("%s: %s", __func__, state.ToString()); + LogError("%s: %s\n", __func__, state.ToString()); + return false; } // Header is valid/has work, merkle tree is good...RELAY NOW @@ -4478,15 +4487,18 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr& blo } if (!ret) { GetMainSignals().BlockChecked(*block, state); - return error("%s: AcceptBlock FAILED: %s", __func__, state.ToString()); + LogError("%s: AcceptBlock FAILED (%s)\n", __func__, state.ToString()); + return false; } } NotifyHeaderTip(ActiveChainstate()); BlockValidationState state; // Only used to report errors, not invalidity - ignore it - if (!ActiveChainstate().ActivateBestChain(state, block)) - return error("%s: ActivateBestChain failed: %s", __func__, state.ToString()); + if (!ActiveChainstate().ActivateBestChain(state, block)) { + LogError("%s: ActivateBestChain failed (%s)\n", __func__, state.ToString()); + return false; + } LogPrintf("%s : ACCEPTED\n", __func__); return true; @@ -4540,12 +4552,18 @@ bool TestBlockValidity(BlockValidationState& state, auto dbTx = evoDb.BeginTransaction(); // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, GetAdjustedTime())) - return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString()); - if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) - return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); - if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) - return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString()); + if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, GetAdjustedTime())) { + LogError("%s: Consensus::ContextualCheckBlockHeader: %s\n", __func__, state.ToString()); + return false; + } + if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) { + LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); + return false; + } + if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) { + LogError("%s: Consensus::ContextualCheckBlock: %s\n", __func__, state.ToString()); + return false; + } if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) return false; @@ -4669,19 +4687,22 @@ bool CVerifyDB::VerifyDB( CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex, consensus_params)) { - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) { - return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, - pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + LogError("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, + pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + return false; } // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; if (!pindex->GetUndoPos().IsNull()) { if (!UndoReadFromDisk(undo, pindex)) { - return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } } } @@ -4693,7 +4714,8 @@ bool CVerifyDB::VerifyDB( assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { - return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } if (res == DISCONNECT_UNCLEAN) { nGoodTransactions = 0; @@ -4708,7 +4730,8 @@ bool CVerifyDB::VerifyDB( if (ShutdownRequested()) return true; } if (pindexFailure) { - return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + LogError("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + return false; } if (skipped_l3_checks) { LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n"); @@ -4728,10 +4751,14 @@ bool CVerifyDB::VerifyDB( uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false); pindex = chainstate.m_chain.Next(pindex); CBlock block; - if (!ReadBlockFromDisk(block, pindex, consensus_params)) - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - if (!chainstate.ConnectBlock(block, state, pindex, coins)) - return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + if (!ReadBlockFromDisk(block, pindex, consensus_params)) { + LogError("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; + } + if (!chainstate.ConnectBlock(block, state, pindex, coins)) { + LogError("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + return false; + } if (ShutdownRequested()) return true; } } @@ -4751,15 +4778,17 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i // TODO: merge with ConnectBlock CBlock block; if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) { - return error("RollforwardBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("RollforwardBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } // MUST process special txes before updating UTXO to ensure consistency between mempool and block processing BlockValidationState state; std::optional mnlist_updates_opt{std::nullopt}; if (!m_chain_helper->special_tx->ProcessSpecialTxsInBlock(block, pindex, inputs, false /*fJustCheck*/, false /*fScriptChecks*/, state, mnlist_updates_opt)) { - return error("RollforwardBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s", - pindex->GetBlockHash().ToString(), state.ToString()); + LogError("RollforwardBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s\n", + pindex->GetBlockHash().ToString(), state.ToString()); + return false; } std::vector addressIndex; @@ -4828,22 +4857,28 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i if (fAddressIndex) { if (!m_blockman.m_block_tree_db->WriteAddressIndex(addressIndex)) { - return error("RollforwardBlock(DASH): Failed to write address index"); + LogError("RollforwardBlock(DASH): Failed to write address index\n"); + return false; } if (!m_blockman.m_block_tree_db->UpdateAddressUnspentIndex(addressUnspentIndex)) { - return error("RollforwardBlock(DASH): Failed to write address unspent index"); + LogError("RollforwardBlock(DASH): Failed to write address unspent index\n"); + return false; } } if (fSpentIndex) { - if (!m_blockman.m_block_tree_db->UpdateSpentIndex(spentIndex)) - return error("RollforwardBlock(DASH): Failed to write transaction index"); + if (!m_blockman.m_block_tree_db->UpdateSpentIndex(spentIndex)) { + LogError("RollforwardBlock(DASH): Failed to write transaction index\n"); + return false; + } } if (fTimestampIndex) { - if (!m_blockman.m_block_tree_db->WriteTimestampIndex(CTimestampIndexKey(pindex->nTime, pindex->GetBlockHash()))) - return error("RollforwardBlock(DASH): Failed to write timestamp index"); + if (!m_blockman.m_block_tree_db->WriteTimestampIndex(CTimestampIndexKey(pindex->nTime, pindex->GetBlockHash()))) { + LogError("RollforwardBlock(DASH): Failed to write timestamp index\n"); + return false; + } } return true; @@ -4858,7 +4893,10 @@ bool CChainState::ReplayBlocks() std::vector hashHeads = db.GetHeadBlocks(); if (hashHeads.empty()) return true; // We're already in a consistent state. - if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state"); + if (hashHeads.size() != 2) { + LogError("ReplayBlocks(): unknown inconsistent state\n"); + return false; + } uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false); LogPrintf("Replaying blocks\n"); @@ -4868,20 +4906,23 @@ bool CChainState::ReplayBlocks() const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip. if (m_blockman.m_block_index.count(hashHeads[0]) == 0) { - return error("ReplayBlocks(): reorganization to unknown block requested"); + LogError("ReplayBlocks(): reorganization to unknown block requested\n"); + return false; } pindexNew = &(m_blockman.m_block_index[hashHeads[0]]); if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. if (m_blockman.m_block_index.count(hashHeads[1]) == 0) { - return error("ReplayBlocks(): reorganization from unknown block requested"); + LogError("ReplayBlocks(): reorganization from unknown block requested\n"); + return false; } pindexOld = &(m_blockman.m_block_index[hashHeads[1]]); pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); const bool fDIP0003Active = DeploymentActiveAt(*pindexOld, m_params.GetConsensus(), Consensus::DEPLOYMENT_DIP0003); if (fDIP0003Active && !m_evoDb.VerifyBestBlock(pindexOld->GetBlockHash())) { - return error("ReplayBlocks(DASH): Found EvoDB inconsistency"); + LogError("ReplayBlocks(DASH): Found EvoDB inconsistency"); + return false; } } @@ -4892,12 +4933,14 @@ bool CChainState::ReplayBlocks() if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) { - return error("ReplayBlocks(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + LogError("ReplayBlocks(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + return false; } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); DisconnectResult res = DisconnectBlock(block, pindexOld, cache); if (res == DISCONNECT_FAILED) { - return error("ReplayBlocks(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + LogError("ReplayBlocks(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + return false; } // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations @@ -5045,7 +5088,8 @@ bool CChainState::AddGenesisBlock(const CBlock& block, BlockValidationState& sta { FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, nullptr)}; if (blockPos.IsNull()) { - return error("%s: writing genesis block to disk failed (%s)", __func__, state.ToString()); + LogError("AddGenesisBlock: writing genesis block to disk failed (%s)\n", state.ToString()); + return false; } CBlockIndex* pindex = m_blockman.AddToBlockIndex(block, block.GetHash(), m_chainman.m_best_header); ReceivedBlockTransactions(block, pindex, blockPos); @@ -5079,7 +5123,8 @@ bool CChainState::LoadGenesisBlock() return false; } } catch (const std::runtime_error &e) { - return error("%s: failed to initialize block database: %s", __func__, e.what()); + LogError("%s: failed to initialize block database: %s", __func__, e.what()); + return false; } return true; diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index f424f19edead..2c8c0dc2ab60 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -77,6 +77,12 @@ def run_test(self): txid_in_block = self.wallet.sendrawtransaction(from_node=node, tx_hex=raw_tx_in_block) self.generate(node, 1) self.mempool_size = 0 + # Check negative feerate + assert_raises_rpc_error(-3, "Amount out of range", lambda: self.check_mempool_result( + result_expected=None, + rawtxs=[raw_tx_in_block], + maxfeerate=-0.01, + )) self.check_mempool_result( result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}], rawtxs=[raw_tx_in_block], diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index bb078591b9f4..bdc127b104a9 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -479,7 +479,7 @@ def raw_iter(self): i = 0 while i < len(self): sop_idx = i - opcode = self[i] + opcode = CScriptOp(self[i]) i += 1 if opcode > OP_PUSHDATA4: @@ -586,7 +586,7 @@ def GetSigOpCount(self, fAccurate): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() + n += lastOpcode.decode_op_n() else: n += 20 lastOpcode = opcode @@ -704,3 +704,17 @@ def test_cscriptnum_encoding(self): values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500] for value in values: self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value) + + def test_legacy_sigopcount(self): + # test repeated single sig ops + for n_ops in range(1, 100, 10): + for singlesig_op in (OP_CHECKSIG, OP_CHECKSIGVERIFY): + singlesigs_script = CScript([singlesig_op]*n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=False), n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=True), n_ops) + # test multisig op (including accurate counting, i.e. BIP16) + for n in range(1, 16+1): + for multisig_op in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): + multisig_script = CScript([CScriptOp.encode_op_n(n), multisig_op]) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=False), 20) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=True), n) diff --git a/test/lint/README.md b/test/lint/README.md index 704922d7abe3..b2f56bda3d62 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -59,3 +59,8 @@ git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git all-lint.py =========== Calls other scripts with the `lint-` prefix. + + +lint_ignore_dirs.py +=================== +Add list of common directories to ignore when running tests diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py index 6e59c83f7dbc..c5de3b4394a5 100755 --- a/test/lint/lint-include-guards.py +++ b/test/lint/lint-include-guards.py @@ -13,15 +13,13 @@ from subprocess import check_output from typing import List +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + HEADER_ID_PREFIX = 'BITCOIN_' HEADER_ID_SUFFIX = '_H' EXCLUDE_FILES_WITH_PREFIX = ['src/crypto/ctaes', - 'src/leveldb', - 'src/crc32c', - 'src/secp256k1', - 'src/minisketch', 'src/tinyformat.h', 'src/bench/nanobench.h', 'src/test/fuzz/FuzzedDataProvider.h', @@ -30,7 +28,7 @@ 'src/ctpl_stl.h', 'src/dashbls', 'src/gsl', - 'src/immer'] + 'src/immer'] + SHARED_EXCLUDED_SUBTREES def _get_header_file_lst() -> List[str]: diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py index 9a0cfa127dde..8bd4ba24c884 100755 --- a/test/lint/lint-includes.py +++ b/test/lint/lint-includes.py @@ -14,6 +14,8 @@ from subprocess import check_output, CalledProcessError +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + EXCLUDED_DIRS = ["src/leveldb/", "src/crc32c/", @@ -21,7 +23,7 @@ "src/minisketch/", "src/dashbls/", "src/immer/", - "src/crypto/x11/"] + "src/crypto/x11/"] + SHARED_EXCLUDED_SUBTREES EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp", "boost/hana/for_each.hpp", diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py index fb4d2495c691..e10d2368dfc5 100755 --- a/test/lint/lint-spelling.py +++ b/test/lint/lint-spelling.py @@ -11,8 +11,11 @@ from subprocess import check_output, STDOUT, CalledProcessError +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt' -FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/dashbls/", ":(exclude)src/crc32c/", ":(exclude)src/crypto/", ":(exclude)src/ctpl_stl.h", ":(exclude)src/cxxtimer.hpp", ":(exclude)src/immer/", ":(exclude)src/leveldb/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)contrib/builder-keys/", ":(exclude)contrib/guix/patches", ":(exclude)src/util/subprocess.hpp", ":(exclude)src/wallet/bip39_english.h"] +FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/dashbls/", ":(exclude)src/crypto/", ":(exclude)src/ctpl_stl.h", ":(exclude)src/cxxtimer.hpp", ":(exclude)src/immer/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)contrib/builder-keys/", ":(exclude)contrib/guix/patches", ":(exclude)src/util/subprocess.hpp", ":(exclude)src/wallet/bip39_english.h"] +FILES_ARGS += [f":(exclude){dir}" for dir in SHARED_EXCLUDED_SUBTREES] def check_codespell_install(): diff --git a/test/lint/lint_ignore_dirs.py b/test/lint/lint_ignore_dirs.py new file mode 100644 index 000000000000..af9ee7ef6bef --- /dev/null +++ b/test/lint/lint_ignore_dirs.py @@ -0,0 +1,5 @@ +SHARED_EXCLUDED_SUBTREES = ["src/leveldb/", + "src/crc32c/", + "src/secp256k1/", + "src/minisketch/", + ]