Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 36 additions & 11 deletions test/functional/feature_llmq_signing.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ def run_test(self):
id = "0000000000000000000000000000000000000000000000000000000000000001"
msgHash = "0000000000000000000000000000000000000000000000000000000000000002"
msgHashConflict = "0000000000000000000000000000000000000000000000000000000000000003"
submit_true_id = "0000000000000000000000000000000000000000000000000000000000000004"
submit_true_msgHash = "0000000000000000000000000000000000000000000000000000000000000005"

def check_sigs(hasrecsigs, isconflicting1, isconflicting2):
for mn in self.mninfo: # type: MasternodeInfo
Expand All @@ -62,6 +64,22 @@ def wait_for_sigs(hasrecsigs, isconflicting1, isconflicting2, timeout):
def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
assert not self.wait_until(lambda: not check_sigs(hasrecsigs, isconflicting1, isconflicting2), timeout = timeout, do_assert = False)

def wait_for_recsig(mn, request_id, request_msg_hash, timeout):
self.wait_until(
lambda: mn.get_node(self).quorum("hasrecsig", q_type, request_id, request_msg_hash),
timeout=timeout,
)

def rpc_sig_share_to_p2p(sig_share_rpc):
sig_share = CSigShare()
sig_share.llmqType = int(sig_share_rpc["llmqType"])
sig_share.quorumHash = int(sig_share_rpc["quorumHash"], 16)
sig_share.quorumMember = int(sig_share_rpc["quorumMember"])
sig_share.id = int(sig_share_rpc["id"], 16)
sig_share.msgHash = int(sig_share_rpc["msgHash"], 16)
sig_share.sigShare = bytes.fromhex(sig_share_rpc["signature"])
return sig_share

# Initial state
wait_for_sigs(False, False, False, 1)

Expand All @@ -79,31 +97,38 @@ def assert_sigs_nochange(hasrecsigs, isconflicting1, isconflicting2, timeout):
# Sign third share and test optional submit parameter if spork21 is enabled, should result in recovered sig
# and conflict for msgHashConflict
if self.options.spork21:
# Sign a distinct request through the default submit=true path to assert sig share relay and recovery.
q_submit_true = self.nodes[0].quorum('selectquorum', q_type, submit_true_id)
submit_true_recovery_member = self.get_mninfo(q_submit_true['recoveryMembers'][0])
submit_true_signers = [mn for mn in self.mninfo if mn != submit_true_recovery_member][:3]
for mn in submit_true_signers:
mn.get_node(self).quorum("sign", q_type, submit_true_id, submit_true_msgHash)
wait_for_recsig(submit_true_recovery_member, submit_true_id, submit_true_msgHash, 15)
Comment on lines +100 to +106
Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💬 Nitpick: submit=true subtest only asserts recovery on the recovery member, not all quorum members

The new submit_true_id/submit_true_msgHash flow waits only for hasrecsig on submit_true_recovery_member. Prior to this PR, the implicit submit=true assertion was the all-nodes wait_for_sigs(True, False, True, 15) at line 136. After the split, that final wait still covers all-nodes recsig propagation, but only for the P2P-injected id/msgHash path. The recsig announcement code path is the same regardless of how recovery was triggered, so this is a minor coverage delta rather than a real regression risk — but extending the assertion to all masternodes is cheap and preserves the original guarantee. Note the suggested fix in the suggestion field; you'd want to re-derive submit_true_signers similarly when applying it.

💡 Suggested change
Suggested change
# Sign a distinct request through the default submit=true path to assert sig share relay and recovery.
q_submit_true = self.nodes[0].quorum('selectquorum', q_type, submit_true_id)
submit_true_recovery_member = self.get_mninfo(q_submit_true['recoveryMembers'][0])
submit_true_signers = [mn for mn in self.mninfo if mn != submit_true_recovery_member][:3]
for mn in submit_true_signers:
mn.get_node(self).quorum("sign", q_type, submit_true_id, submit_true_msgHash)
wait_for_recsig(submit_true_recovery_member, submit_true_id, submit_true_msgHash, 15)
# Sign a distinct request through the default submit=true path to assert sig share relay and recovery
# propagates to every quorum member.
q_submit_true = self.nodes[0].quorum('selectquorum', q_type, submit_true_id)
submit_true_recovery_member = self.get_mninfo(q_submit_true['recoveryMembers'][0])
submit_true_signers = [mn for mn in self.mninfo if mn != submit_true_recovery_member][:3]
for mn in submit_true_signers:
mn.get_node(self).quorum("sign", q_type, submit_true_id, submit_true_msgHash)
self.wait_until(
lambda: all(
mn.get_node(self).quorum("hasrecsig", q_type, submit_true_id, submit_true_msgHash)
for mn in self.mninfo
),
timeout=15,
)

source: ['codex']


# 1. Providing an invalid quorum hash and set submit=false, should throw an error
assert_raises_rpc_error(-8, 'quorum not found', self.mninfo[2].get_node(self).quorum, "sign", q_type, id, msgHash, id, False)
# 2. Providing a valid quorum hash and set submit=false, should return a valid sigShare object
sig_share_rpc_1 = self.mninfo[2].get_node(self).quorum("sign", q_type, id, msgHash, quorumHash, False)
sig_share_rpc_2 = self.mninfo[2].get_node(self).quorum("sign", q_type, id, msgHash, "", False)
assert_equal(sig_share_rpc_1, sig_share_rpc_2)
assert_sigs_nochange(False, False, False, 3)
# 3. Sending the sig share received from RPC to the recovery member through P2P interface, should result
# in a recovered sig
sig_share = CSigShare()
sig_share.llmqType = int(sig_share_rpc_1["llmqType"])
sig_share.quorumHash = int(sig_share_rpc_1["quorumHash"], 16)
sig_share.quorumMember = int(sig_share_rpc_1["quorumMember"])
sig_share.id = int(sig_share_rpc_1["id"], 16)
sig_share.msgHash = int(sig_share_rpc_1["msgHash"], 16)
sig_share.sigShare = bytes.fromhex(sig_share_rpc_1["signature"])
# 3. Sending enough sig shares received from RPC to the recovery member through P2P interface, should
# result in a recovered sig. Build all threshold shares explicitly so this test does not depend on the
# asynchronous submit=true shares above being relayed before the timeout expires.
sig_shares = [
rpc_sig_share_to_p2p(self.mninfo[i].get_node(self).quorum("sign", q_type, id, msgHash, quorumHash, False))
for i in range(2)
]
sig_shares.append(rpc_sig_share_to_p2p(sig_share_rpc_1))
for mn in self.mninfo: # type: MasternodeInfo
assert mn.get_node(self).getconnectioncount() == self.llmq_size
# Get the current recovery member of the quorum
q = self.nodes[0].quorum('selectquorum', q_type, id)
mn: MasternodeInfo = self.get_mninfo(q['recoveryMembers'][0])
# Open a P2P connection to it
p2p_interface = mn.get_node(self).add_p2p_connection(P2PInterface())
# Send the last required QSIGSHARE message to the recovery member
p2p_interface.send_message(msg_qsigshare([sig_share]))
# Send the required QSIGSHARE messages to the recovery member
p2p_interface.send_and_ping(msg_qsigshare(sig_shares))
Comment on lines +115 to +131
Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🟡 Suggestion: Direct injection no longer positively covers the submit=true relay path

This block now rebuilds shares for members 0, 1, and 2 with submit=false and injects all of them directly into the selected recovery member. In this test set_dash_llmq_test_params(5, 3) overrides LLMQ_TEST to a 3-of-5 quorum, so those three injected shares are sufficient by themselves; the recovered-signature assertion at line 120 can pass even if the earlier default submit=true calls at lines 79 and 87 stop relaying shares under SPORK_21. Avoiding that async dependency is the right way to deflake this specific check, but it also means the test no longer has a positive assertion that submit=true shares are relayed and consumed. If that behavior is meant to stay covered here, add a separate deterministic assertion for the submit=true relay path rather than relying on this recovery step.

source: ['codex']

🤖 Fix this with AI agents
These findings are from an automated code review. Verify each finding against the current code and only fix it if needed.

In `test/functional/feature_llmq_signing.py`:
- [SUGGESTION] lines 99-115: Direct injection no longer positively covers the submit=true relay path
  This block now rebuilds shares for members 0, 1, and 2 with `submit=false` and injects all of them directly into the selected recovery member. In this test `set_dash_llmq_test_params(5, 3)` overrides `LLMQ_TEST` to a 3-of-5 quorum, so those three injected shares are sufficient by themselves; the recovered-signature assertion at line 120 can pass even if the earlier default `submit=true` calls at lines 79 and 87 stop relaying shares under SPORK_21. Avoiding that async dependency is the right way to deflake this specific check, but it also means the test no longer has a positive assertion that submit=true shares are relayed and consumed. If that behavior is meant to stay covered here, add a separate deterministic assertion for the submit=true relay path rather than relying on this recovery step.

else:
# If spork21 is not enabled just sign regularly
self.mninfo[2].get_node(self).quorum("sign", q_type, id, msgHash)
Expand Down
Loading