From 6313068b3c084ec33ac0374cb4989d7cb2a723e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 4 Jul 2025 22:20:30 +0100 Subject: [PATCH 01/68] feat(gossipsub): implement gossipsub 1.3 --- protocols/gossipsub/CHANGELOG.md | 4 + protocols/gossipsub/src/behaviour.rs | 98 ++++++++++++------- protocols/gossipsub/src/behaviour/tests.rs | 90 ++++++++++++++++- .../gossipsub/src/generated/gossipsub/pb.rs | 17 ++++ protocols/gossipsub/src/generated/rpc.proto | 12 +++ protocols/gossipsub/src/protocol.rs | 13 ++- protocols/gossipsub/src/rpc.rs | 1 + protocols/gossipsub/src/types.rs | 38 ++++++- 8 files changed, 233 insertions(+), 40 deletions(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 0bfc18b5876..d77c8109068 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,8 @@ ## 0.50.0 + +- Implement gossipsub 1.3 extensions control message. + See [PR 6119](https://github.com/libp2p/rust-libp2p/pull/6119) + - Remove peer penalty for duplicate messages. See [PR 6112](https://github.com/libp2p/rust-libp2p/pull/6112) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 4c2b89bed31..6e88f9de5d3 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -68,8 +68,8 @@ use crate::{ topic::{Hasher, Topic, TopicHash}, transform::{DataTransform, IdentityTransform}, types::{ - ControlAction, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, MessageId, - PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, + MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction, }, FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, @@ -1521,6 +1521,26 @@ where tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } + fn handle_extensions(&mut self, peer_id: &PeerId, extensions: Extensions) { + let Some(peer) = self.connected_peers.get_mut(peer_id) else { + tracing::error!( + peer=%peer_id, + "Extensions by unknown peer" + ); + return; + }; + + if peer.extensions.is_some() { + tracing::debug!( + peer=%peer_id, + "Peer had already sent us extensions message" + ); + return; + } + + peer.extensions = Some(extensions); + } + /// Removes the specified peer from the mesh, returning true if it was present. fn remove_peer_from_mesh( &mut self, @@ -2898,7 +2918,8 @@ where RpcOut::Graft(_) | RpcOut::Prune(_) | RpcOut::Subscribe(_) - | RpcOut::Unsubscribe(_) => { + | RpcOut::Unsubscribe(_) + | RpcOut::Extensions(_) => { unreachable!("Channel for highpriority control messages is unbounded and should always be open.") } } @@ -3125,24 +3146,25 @@ where // The protocol negotiation occurs once a message is sent/received. Once this happens we // update the type of peer that this is in order to determine which kind of routing should // occur. - let connected_peer = self - .connected_peers - .entry(peer_id) - .or_insert_with(|| PeerDetails { - kind: PeerKind::Floodsub, - connections: vec![], - outbound: false, - sender: Sender::new(self.config.connection_handler_queue_len()), - topics: Default::default(), - dont_send: LinkedHashMap::new(), - }); + let connected_peer = self.connected_peers.entry(peer_id).or_insert(PeerDetails { + kind: PeerKind::Floodsub, + connections: vec![], + outbound: false, + sender: Sender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + dont_send: LinkedHashMap::new(), + extensions: None, + }); // Add the new connection connected_peer.connections.push(connection_id); + let receiver = connected_peer.sender.new_receiver(); - Ok(Handler::new( - self.config.protocol_config(), - connected_peer.sender.new_receiver(), - )) + if connected_peer.connections.len() <= 1 { + // If this is the first connection send extensions message. + self.send_message(peer_id, RpcOut::Extensions(Extensions {})); + } + + Ok(Handler::new(self.config.protocol_config(), receiver)) } fn handle_established_outbound_connection( @@ -3153,26 +3175,27 @@ where _: Endpoint, _: PortUse, ) -> Result, ConnectionDenied> { - let connected_peer = self - .connected_peers - .entry(peer_id) - .or_insert_with(|| PeerDetails { - kind: PeerKind::Floodsub, - connections: vec![], - // Diverging from the go implementation we only want to consider a peer as outbound - // peer if its first connection is outbound. - outbound: !self.px_peers.contains(&peer_id), - sender: Sender::new(self.config.connection_handler_queue_len()), - topics: Default::default(), - dont_send: LinkedHashMap::new(), - }); + let connected_peer = self.connected_peers.entry(peer_id).or_insert(PeerDetails { + kind: PeerKind::Floodsub, + connections: vec![], + // Diverging from the go implementation we only want to consider a peer as outbound peer + // if its first connection is outbound. + outbound: !self.px_peers.contains(&peer_id), + sender: Sender::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + dont_send: LinkedHashMap::new(), + extensions: None, + }); // Add the new connection connected_peer.connections.push(connection_id); + let receiver = connected_peer.sender.new_receiver(); + + if connected_peer.connections.len() <= 1 { + // If this is the first connection send extensions message. + self.send_message(peer_id, RpcOut::Extensions(Extensions {})); + } - Ok(Handler::new( - self.config.protocol_config(), - connected_peer.sender.new_receiver(), - )) + Ok(Handler::new(self.config.protocol_config(), receiver)) } fn on_connection_handler_event( @@ -3357,6 +3380,11 @@ where } } } + ControlAction::Extensions(extensions) => { + if let Some(extensions) = extensions { + self.handle_extensions(&propagation_source, extensions); + } + } } } if !ihave_msgs.is_empty() { diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 04c86bd3df4..cf5dcadd0c5 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -33,8 +33,9 @@ use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, rpc::Receiver, + rpc_proto::proto, subscription_filter::WhitelistSubscriptionFilter, - types::RpcIn, + types::{ControlAction, Extensions, RpcIn, RpcOut}, IdentTopic as Topic, }; @@ -248,6 +249,7 @@ where topics: Default::default(), sender, dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -644,6 +646,7 @@ fn test_join() { topics: Default::default(), sender, dont_send: LinkedHashMap::new(), + extensions: None, }, ); receivers.insert(random_peer, receiver); @@ -1041,6 +1044,7 @@ fn test_get_random_peers() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); } @@ -5595,6 +5599,7 @@ fn test_all_queues_full() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -5631,6 +5636,7 @@ fn test_slow_peer_returns_failed_publish() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); let peer_id = PeerId::random(); @@ -5644,6 +5650,7 @@ fn test_slow_peer_returns_failed_publish() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -5705,6 +5712,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); peers.push(slow_peer_id); @@ -5722,6 +5730,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -5819,6 +5828,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); peers.push(slow_peer_id); @@ -5836,6 +5846,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -5913,6 +5924,7 @@ fn test_slow_peer_returns_failed_forward() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); peers.push(slow_peer_id); @@ -5930,6 +5942,7 @@ fn test_slow_peer_returns_failed_forward() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -6012,6 +6025,7 @@ fn test_slow_peer_is_downscored_on_publish() { topics: topics.clone(), sender: Sender::new(2), dont_send: LinkedHashMap::new(), + extensions: None, }, ); gs.as_peer_score_mut().add_peer(slow_peer_id); @@ -6026,6 +6040,7 @@ fn test_slow_peer_is_downscored_on_publish() { topics: topics.clone(), sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, }, ); @@ -6787,3 +6802,76 @@ fn test_validation_message_size_within_topic_specific() { _ => panic!("Unexpected event"), } } + +#[test] +fn test_extensions_message_creation() { + let extensions_rpc = RpcOut::Extensions(Extensions {}); + let proto_rpc: proto::RPC = extensions_rpc.into(); + + assert!(proto_rpc.control.is_some()); + let control = proto_rpc.control.unwrap(); + assert!(control.extensions.is_some()); + assert!(control.ihave.is_empty()); + assert!(control.iwant.is_empty()); + assert!(control.graft.is_empty()); + assert!(control.prune.is_empty()); + assert!(control.idontwant.is_empty()); +} + +#[test] +fn test_handle_extensions_message() { + let mut gs: Behaviour = Behaviour::new( + MessageAuthenticity::Anonymous, + ConfigBuilder::default() + .validation_mode(ValidationMode::None) + .build() + .unwrap(), + ) + .unwrap(); + + let peer_id = PeerId::random(); + let sender = Sender::new(gs.config.connection_handler_queue_len()); + + // Add peer without extensions + gs.connected_peers.insert( + peer_id, + PeerDetails { + kind: PeerKind::Gossipsubv1_3, + connections: vec![ConnectionId::new_unchecked(0)], + outbound: false, + topics: BTreeSet::new(), + sender, + dont_send: LinkedHashMap::new(), + extensions: None, + }, + ); + + // Simulate receiving extensions message + let extensions = Extensions {}; + gs.handle_extensions(&peer_id, extensions); + + // Verify extensions were stored + let peer_details = gs.connected_peers.get(&peer_id).unwrap(); + assert!(peer_details.extensions.is_some()); + + // Simulate receiving duplicate extensions message from another peer + // TODO: when more extensions are added, we should test that they are not overridden. + let duplicate_rpc = RpcIn { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![ControlAction::Extensions(None)], + }; + + gs.on_connection_handler_event( + peer_id, + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc: duplicate_rpc, + invalid_messages: vec![], + }, + ); + + // Extensions should still be present (not cleared or changed) + let peer_details = gs.connected_peers.get(&peer_id).unwrap(); + assert!(peer_details.extensions.is_some()); +} diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 24ac80d2755..9a3ddb2e2fb 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -155,6 +155,7 @@ pub struct ControlMessage { pub graft: Vec, pub prune: Vec, pub idontwant: Vec, + pub extensions: Option, } impl<'a> MessageRead<'a> for ControlMessage { @@ -167,6 +168,7 @@ impl<'a> MessageRead<'a> for ControlMessage { Ok(26) => msg.graft.push(r.read_message::(bytes)?), Ok(34) => msg.prune.push(r.read_message::(bytes)?), Ok(42) => msg.idontwant.push(r.read_message::(bytes)?), + Ok(50) => msg.extensions = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -183,6 +185,7 @@ impl MessageWrite for ControlMessage { + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.idontwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.extensions.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { @@ -191,6 +194,7 @@ impl MessageWrite for ControlMessage { for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } for s in &self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } + if let Some(ref s) = self.extensions { w.write_with_tag(50, |w| w.write_message(s))?; } Ok(()) } } @@ -367,6 +371,19 @@ impl MessageWrite for ControlIDontWant { } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlExtensions { } + +impl<'a> MessageRead<'a> for ControlExtensions { + fn from_reader(r: &mut BytesReader, _: &[u8]) -> Result { + r.read_to_end(); + Ok(Self::default()) + } +} + +impl MessageWrite for ControlExtensions { } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] pub struct PeerInfo { diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index fe4d3bc9366..4f50bc77aaa 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -12,6 +12,12 @@ message RPC { } optional ControlMessage control = 3; + + // Canonical Extensions should register their messages here. + + // Experimental Extensions should register their messages here. They + // must use field numbers larger than 0x200000 to be encoded with at least 4 + // bytes } message Message { @@ -29,6 +35,7 @@ message ControlMessage { repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; repeated ControlIDontWant idontwant = 5; + optional ControlExtensions extensions = 6; } message ControlIHave { @@ -54,6 +61,11 @@ message ControlIDontWant { repeated bytes message_ids = 1; } +message ControlExtensions { + // Initially empty. Future extensions will be added here along with a + // reference to their specification. +} + message PeerInfo { optional bytes peer_id = 1; optional bytes signed_peer_record = 2; diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 821c11d2132..dff5c4ffe75 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -35,14 +35,19 @@ use crate::{ rpc_proto::proto, topic::TopicHash, types::{ - ControlAction, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, - RawMessage, RpcIn, Subscription, SubscriptionAction, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, + Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, }, ValidationError, }; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; +pub(crate) const GOSSIPSUB_1_3_0_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.3.0"), + kind: PeerKind::Gossipsubv1_2, +}; + pub(crate) const GOSSIPSUB_1_2_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.2.0"), kind: PeerKind::Gossipsubv1_2, @@ -79,6 +84,7 @@ impl Default for ProtocolConfig { Self { validation_mode: ValidationMode::Strict, protocol_ids: vec![ + GOSSIPSUB_1_3_0_PROTOCOL, GOSSIPSUB_1_2_0_PROTOCOL, GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL, @@ -556,11 +562,14 @@ impl Decoder for GossipsubCodec { }) .collect(); + let extension_msg = rpc_control.extensions.map(|_extension| Extensions {}); + control_msgs.extend(ihave_msgs); control_msgs.extend(iwant_msgs); control_msgs.extend(graft_msgs); control_msgs.extend(prune_msgs); control_msgs.extend(idontwant_msgs); + control_msgs.push(ControlAction::Extensions(extension_msg)); } Ok(Some(HandlerEvent::Message { diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index 943df31f599..ca4b25dfd23 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -87,6 +87,7 @@ impl Sender { RpcOut::Publish { .. } | RpcOut::Graft(_) | RpcOut::Prune(_) + | RpcOut::Extensions(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => &self.priority_sender, RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) | RpcOut::IDontWant(_) => { diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 8f8a4f38a88..30edfe28c57 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -30,7 +30,11 @@ use quick_protobuf::MessageWrite; use serde::{Deserialize, Serialize}; use web_time::Instant; -use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; +use crate::{ + rpc::Sender, + rpc_proto::proto::{self}, + TopicHash, +}; /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default)] @@ -105,6 +109,8 @@ impl std::fmt::Debug for MessageId { pub(crate) struct PeerDetails { /// The kind of protocol the peer supports. pub(crate) kind: PeerKind, + /// The Extensions supported by the peer if any. + pub(crate) extensions: Option, /// If the peer is an outbound connection. pub(crate) outbound: bool, /// Its current connections. @@ -124,6 +130,8 @@ pub(crate) struct PeerDetails { derive(prometheus_client::encoding::EncodeLabelValue) )] pub enum PeerKind { + /// A gossipsub 1.3 peer. + Gossipsubv1_3, /// A gossipsub 1.2 peer. Gossipsubv1_2, /// A gossipsub 1.1 peer. @@ -271,6 +279,8 @@ pub enum ControlAction { /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant /// control message. IDontWant(IDontWant), + /// The Node has sent us its supported extensions. + Extensions(Option), } /// Node broadcasts known messages per topic - IHave control message. @@ -314,10 +324,14 @@ pub struct IDontWant { pub(crate) message_ids: Vec, } +/// The node has sent us the supported Gossipsub Extensions. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Extensions {} + /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { - /// Publish a Gossipsub message on network.`timeout` limits the duration the message + /// PublishV a Gossipsub message on network.`timeout` limits the duration the message /// can wait to be sent before it is abandoned. Publish { message: RawMessage, timeout: Delay }, /// Forward a Gossipsub message on network. `timeout` limits the duration the message @@ -338,6 +352,8 @@ pub enum RpcOut { /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant /// control message. IDontWant(IDontWant), + /// Send a Extensions control message. + Extensions(Extensions), } impl RpcOut { @@ -399,6 +415,7 @@ impl From for proto::RPC { graft: vec![], prune: vec![], idontwant: vec![], + extensions: None, }), }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { @@ -412,6 +429,7 @@ impl From for proto::RPC { graft: vec![], prune: vec![], idontwant: vec![], + extensions: None, }), }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { @@ -425,6 +443,7 @@ impl From for proto::RPC { }], prune: vec![], idontwant: vec![], + extensions: None, }), }, RpcOut::Prune(Prune { @@ -452,6 +471,7 @@ impl From for proto::RPC { backoff, }], idontwant: vec![], + extensions: None, }), } } @@ -466,6 +486,19 @@ impl From for proto::RPC { idontwant: vec![proto::ControlIDontWant { message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), }], + extensions: None, + }), + }, + RpcOut::Extensions(Extensions {}) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: Some(proto::ControlExtensions {}), }), }, } @@ -507,6 +540,7 @@ impl PeerKind { Self::Gossipsub => "Gossipsub v1.0", Self::Gossipsubv1_1 => "Gossipsub v1.1", Self::Gossipsubv1_2 => "Gossipsub v1.2", + Self::Gossipsubv1_3 => "Gossipsub v1.3", } } } From 53cc7e81efbc12257ac197909a2bdca98f7c737e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 7 Aug 2025 23:11:47 +0100 Subject: [PATCH 02/68] implement test extesion --- protocols/gossipsub/src/behaviour.rs | 27 +++++++++++-- protocols/gossipsub/src/behaviour/tests.rs | 29 +++++++++++--- .../gossipsub/src/generated/gossipsub/pb.rs | 40 ++++++++++++++++++- protocols/gossipsub/src/generated/rpc.proto | 8 +++- protocols/gossipsub/src/protocol.rs | 7 +++- protocols/gossipsub/src/rpc.rs | 8 ++-- protocols/gossipsub/src/types.rs | 39 ++++++++++++++---- 7 files changed, 134 insertions(+), 24 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 6e88f9de5d3..efe4983d1e0 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1539,6 +1539,12 @@ where } peer.extensions = Some(extensions); + + if extensions.test_extension.unwrap_or(false) + && matches!(peer.kind, PeerKind::Gossipsubv1_3) + { + self.send_message(*peer_id, RpcOut::TestExtension); + } } /// Removes the specified peer from the mesh, returning true if it was present. @@ -2919,7 +2925,8 @@ where | RpcOut::Prune(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) - | RpcOut::Extensions(_) => { + | RpcOut::Extensions(_) + | RpcOut::TestExtension => { unreachable!("Channel for highpriority control messages is unbounded and should always be open.") } } @@ -3161,7 +3168,12 @@ where if connected_peer.connections.len() <= 1 { // If this is the first connection send extensions message. - self.send_message(peer_id, RpcOut::Extensions(Extensions {})); + self.send_message( + peer_id, + RpcOut::Extensions(Extensions { + test_extension: Some(true), + }), + ); } Ok(Handler::new(self.config.protocol_config(), receiver)) @@ -3192,7 +3204,12 @@ where if connected_peer.connections.len() <= 1 { // If this is the first connection send extensions message. - self.send_message(peer_id, RpcOut::Extensions(Extensions {})); + self.send_message( + peer_id, + RpcOut::Extensions(Extensions { + test_extension: Some(true), + }), + ); } Ok(Handler::new(self.config.protocol_config(), receiver)) @@ -3396,6 +3413,10 @@ where if !prune_msgs.is_empty() { self.handle_prune(&propagation_source, prune_msgs); } + + if let Some(_extension) = rpc.test_extension { + tracing::debug!("Received Test Extension"); + } } } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index cf5dcadd0c5..36ddf4162aa 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -420,6 +420,7 @@ fn proto_to_message(rpc: &proto::RPC) -> RpcIn { }) .collect(), control_msgs, + test_extension: None, } } @@ -1253,6 +1254,7 @@ fn test_handle_iwant_msg_but_already_sent_idontwant() { control_msgs: vec![ControlAction::IDontWant(IDontWant { message_ids: vec![msg_id.clone()], })], + test_extension: None, }; gs.on_connection_handler_event( peers[1], @@ -3145,6 +3147,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { messages: vec![raw_message1], subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], + test_extension: None, }, invalid_messages: Vec::new(), }, @@ -3171,6 +3174,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { messages: vec![raw_message3], subscriptions: vec![subscription], control_msgs: vec![control_action], + test_extension: None, }, invalid_messages: Vec::new(), }, @@ -3781,6 +3785,7 @@ fn test_scoring_p4_invalid_signature() { messages: vec![], subscriptions: vec![], control_msgs: vec![], + test_extension: None, }, invalid_messages: vec![(m, ValidationError::InvalidSignature)], }, @@ -5540,6 +5545,7 @@ fn parses_idontwant() { control_msgs: vec![ControlAction::IDontWant(IDontWant { message_ids: vec![message_id.clone()], })], + test_extension: None, }; gs.on_connection_handler_event( peers[1], @@ -6638,6 +6644,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { messages: vec![raw_message], subscriptions: vec![], control_msgs: vec![], + test_extension: None, }, invalid_messages: vec![], }, @@ -6682,6 +6689,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { }], subscriptions: vec![], control: None, + testExtension: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6742,6 +6750,7 @@ fn test_validation_message_size_within_topic_specific() { messages: vec![raw_message], subscriptions: vec![], control_msgs: vec![], + test_extension: None, }, invalid_messages: vec![], }, @@ -6786,6 +6795,7 @@ fn test_validation_message_size_within_topic_specific() { }], subscriptions: vec![], control: None, + testExtension: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6805,12 +6815,16 @@ fn test_validation_message_size_within_topic_specific() { #[test] fn test_extensions_message_creation() { - let extensions_rpc = RpcOut::Extensions(Extensions {}); + let extensions_rpc = RpcOut::Extensions(Extensions { + test_extension: Some(true), + }); let proto_rpc: proto::RPC = extensions_rpc.into(); assert!(proto_rpc.control.is_some()); let control = proto_rpc.control.unwrap(); assert!(control.extensions.is_some()); + let test_extension = control.extensions.unwrap().testExtension.unwrap(); + assert!(test_extension); assert!(control.ihave.is_empty()); assert!(control.iwant.is_empty()); assert!(control.graft.is_empty()); @@ -6847,7 +6861,9 @@ fn test_handle_extensions_message() { ); // Simulate receiving extensions message - let extensions = Extensions {}; + let extensions = Extensions { + test_extension: Some(false), + }; gs.handle_extensions(&peer_id, extensions); // Verify extensions were stored @@ -6855,11 +6871,13 @@ fn test_handle_extensions_message() { assert!(peer_details.extensions.is_some()); // Simulate receiving duplicate extensions message from another peer - // TODO: when more extensions are added, we should test that they are not overridden. let duplicate_rpc = RpcIn { messages: vec![], subscriptions: vec![], - control_msgs: vec![ControlAction::Extensions(None)], + control_msgs: vec![ControlAction::Extensions(Some(Extensions { + test_extension: Some(true), + }))], + test_extension: None, }; gs.on_connection_handler_event( @@ -6873,5 +6891,6 @@ fn test_handle_extensions_message() { // Extensions should still be present (not cleared or changed) let peer_details = gs.connected_peers.get(&peer_id).unwrap(); - assert!(peer_details.extensions.is_some()); + let test_extension = peer_details.extensions.unwrap().test_extension.unwrap(); + assert!(!test_extension); } diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 9a3ddb2e2fb..a62ac15b6d9 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -19,6 +19,7 @@ pub struct RPC { pub subscriptions: Vec, pub publish: Vec, pub control: Option, + pub testExtension: Option, } impl<'a> MessageRead<'a> for RPC { @@ -29,6 +30,7 @@ impl<'a> MessageRead<'a> for RPC { Ok(10) => msg.subscriptions.push(r.read_message::(bytes)?), Ok(18) => msg.publish.push(r.read_message::(bytes)?), Ok(26) => msg.control = Some(r.read_message::(bytes)?), + Ok(51939474) => msg.testExtension = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -43,12 +45,14 @@ impl MessageWrite for RPC { + self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; } for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; } if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; } + if let Some(ref s) = self.testExtension { w.write_with_tag(51939474, |w| w.write_message(s))?; } Ok(()) } } @@ -373,16 +377,48 @@ impl MessageWrite for ControlIDontWant { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] -pub struct ControlExtensions { } +pub struct ControlExtensions { + pub testExtension: Option, +} impl<'a> MessageRead<'a> for ControlExtensions { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(51939472) => msg.testExtension = Some(r.read_bool(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlExtensions { + fn get_size(&self) -> usize { + 0 + + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_varint(*(m) as u64)) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.testExtension { w.write_with_tag(51939472, |w| w.write_bool(*s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct TestExtension { } + +impl<'a> MessageRead<'a> for TestExtension { fn from_reader(r: &mut BytesReader, _: &[u8]) -> Result { r.read_to_end(); Ok(Self::default()) } } -impl MessageWrite for ControlExtensions { } +impl MessageWrite for TestExtension { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index 4f50bc77aaa..4a95257b0d8 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -12,12 +12,12 @@ message RPC { } optional ControlMessage control = 3; - // Canonical Extensions should register their messages here. // Experimental Extensions should register their messages here. They // must use field numbers larger than 0x200000 to be encoded with at least 4 // bytes + optional TestExtension testExtension = 6492434; } message Message { @@ -64,8 +64,14 @@ message ControlIDontWant { message ControlExtensions { // Initially empty. Future extensions will be added here along with a // reference to their specification. + + // Experimental extensions must use field numbers larger than 0x200000 to be + // encoded with at least 4 bytes + optional bool testExtension = 6492434; } +message TestExtension {} + message PeerInfo { optional bytes peer_id = 1; optional bytes signed_peer_record = 2; diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index dff5c4ffe75..e91bbbb1837 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -36,7 +36,7 @@ use crate::{ topic::TopicHash, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, - Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, + Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, TestExtension, }, ValidationError, }; @@ -562,7 +562,9 @@ impl Decoder for GossipsubCodec { }) .collect(); - let extension_msg = rpc_control.extensions.map(|_extension| Extensions {}); + let extension_msg = rpc_control.extensions.map(|extensions| Extensions { + test_extension: extensions.testExtension, + }); control_msgs.extend(ihave_msgs); control_msgs.extend(iwant_msgs); @@ -588,6 +590,7 @@ impl Decoder for GossipsubCodec { }) .collect(), control_msgs, + test_extension: rpc.testExtension.map(|_extension| TestExtension {}), }, invalid_messages, })) diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index ca4b25dfd23..3b20e70afa9 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -90,9 +90,11 @@ impl Sender { | RpcOut::Extensions(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => &self.priority_sender, - RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) | RpcOut::IDontWant(_) => { - &self.non_priority_sender - } + RpcOut::Forward { .. } + | RpcOut::IHave(_) + | RpcOut::IWant(_) + | RpcOut::IDontWant(_) + | RpcOut::TestExtension => &self.non_priority_sender, }; sender.try_send(rpc).map_err(|err| err.into_inner()) } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 30edfe28c57..59fe4cfe407 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -30,11 +30,7 @@ use quick_protobuf::MessageWrite; use serde::{Deserialize, Serialize}; use web_time::Instant; -use crate::{ - rpc::Sender, - rpc_proto::proto::{self}, - TopicHash, -}; +use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default)] @@ -326,7 +322,12 @@ pub struct IDontWant { /// The node has sent us the supported Gossipsub Extensions. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Extensions {} +pub struct Extensions { + pub(crate) test_extension: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct TestExtension {} /// A Gossipsub RPC message sent. #[derive(Debug)] @@ -354,6 +355,8 @@ pub enum RpcOut { IDontWant(IDontWant), /// Send a Extensions control message. Extensions(Extensions), + /// Send a test extension message. + TestExtension, } impl RpcOut { @@ -375,6 +378,7 @@ impl From for proto::RPC { subscriptions: Vec::new(), publish: vec![message.into()], control: None, + testExtension: None, }, RpcOut::Forward { message, @@ -383,6 +387,7 @@ impl From for proto::RPC { publish: vec![message.into()], subscriptions: Vec::new(), control: None, + testExtension: None, }, RpcOut::Subscribe(topic) => proto::RPC { publish: Vec::new(), @@ -391,6 +396,7 @@ impl From for proto::RPC { topic_id: Some(topic.into_string()), }], control: None, + testExtension: None, }, RpcOut::Unsubscribe(topic) => proto::RPC { publish: Vec::new(), @@ -399,6 +405,7 @@ impl From for proto::RPC { topic_id: Some(topic.into_string()), }], control: None, + testExtension: None, }, RpcOut::IHave(IHave { topic_hash, @@ -417,6 +424,7 @@ impl From for proto::RPC { idontwant: vec![], extensions: None, }), + testExtension: None, }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { publish: Vec::new(), @@ -431,6 +439,7 @@ impl From for proto::RPC { idontwant: vec![], extensions: None, }), + testExtension: None, }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { publish: Vec::new(), @@ -445,6 +454,7 @@ impl From for proto::RPC { idontwant: vec![], extensions: None, }), + testExtension: None, }, RpcOut::Prune(Prune { topic_hash, @@ -473,6 +483,7 @@ impl From for proto::RPC { idontwant: vec![], extensions: None, }), + testExtension: None, } } RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { @@ -488,8 +499,9 @@ impl From for proto::RPC { }], extensions: None, }), + testExtension: None, }, - RpcOut::Extensions(Extensions {}) => proto::RPC { + RpcOut::Extensions(Extensions { test_extension }) => proto::RPC { publish: Vec::new(), subscriptions: Vec::new(), control: Some(proto::ControlMessage { @@ -498,8 +510,17 @@ impl From for proto::RPC { graft: vec![], prune: vec![], idontwant: vec![], - extensions: Some(proto::ControlExtensions {}), + extensions: Some(proto::ControlExtensions { + testExtension: test_extension, + }), }), + testExtension: None, + }, + RpcOut::TestExtension => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: Some(proto::TestExtension {}), }, } } @@ -514,6 +535,8 @@ pub struct RpcIn { pub subscriptions: Vec, /// List of Gossipsub control messages. pub control_msgs: Vec, + /// Gossipsub test extension. + pub test_extension: Option, } impl fmt::Debug for RpcIn { From 4249ccb37e468bc523215c6d623264a91b3d6e67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 7 Aug 2025 22:14:08 +0100 Subject: [PATCH 03/68] Add PartialMessage trait implementation --- protocols/gossipsub/src/behaviour.rs | 2 + protocols/gossipsub/src/error.rs | 73 ++++++++ .../gossipsub/src/generated/gossipsub/pb.rs | 169 ++++++++++++++++++ protocols/gossipsub/src/generated/rpc.proto | 28 ++- protocols/gossipsub/src/lib.rs | 2 + protocols/gossipsub/src/partial.rs | 86 +++++++++ protocols/gossipsub/src/protocol.rs | 7 +- protocols/gossipsub/src/types.rs | 18 +- 8 files changed, 380 insertions(+), 5 deletions(-) create mode 100644 protocols/gossipsub/src/partial.rs diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index efe4983d1e0..63c3234a872 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -3172,6 +3172,7 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), + partial_messages: Some(true), }), ); } @@ -3208,6 +3209,7 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), + partial_messages: Some(true), }), ); } diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 7af14d84ac0..df6d0678680 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -160,3 +160,76 @@ impl std::fmt::Display for ConfigBuilderError { } } } + +/// Errors that can occur during partial message processing. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PartialMessageError { + /// The received data is too short to contain required headers/metadata. + InsufficientData { + /// Expected minimum number of bytes. + expected: usize, + /// Actual number of bytes received. + received: usize, + }, + + /// The data format is invalid or corrupted. + InvalidFormat, + + /// The partial data doesn't belong to this message group. + WrongGroup { + /// Expected minimum number of bytes. + expected: usize, + /// Actual number of bytes received. + received: usize, + }, + + /// The partial data is a duplicate of already received data. + DuplicateData(Vec), + + /// The partial data is out of the expected range or sequence. + OutOfRange, + + /// The message is already complete and cannot accept more data. + AlreadyComplete, + + /// Application-specific validation failed. + ValidationFailed, +} + +impl std::error::Error for PartialMessageError {} + +impl std::fmt::Display for PartialMessageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::InsufficientData { expected, received } => { + write!( + f, + "Insufficient data: expected at least {} bytes, got {}", + expected, received + ) + } + Self::InvalidFormat => { + write!(f, "Invalid data format") + } + Self::WrongGroup { expected, received } => { + write!( + f, + "Wrong group ID: expected {:?}, got {:?}", + expected, received + ) + } + Self::DuplicateData(part_id) => { + write!(f, "Duplicate data for part {:?}", part_id) + } + Self::OutOfRange => { + write!(f, "Data out of range") + } + Self::AlreadyComplete => { + write!(f, "Message is already complete") + } + Self::ValidationFailed => { + write!(f, "Validation failed") + } + } + } +} diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index a62ac15b6d9..3aed06a6515 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -20,6 +20,7 @@ pub struct RPC { pub publish: Vec, pub control: Option, pub testExtension: Option, + pub partial: Option, } impl<'a> MessageRead<'a> for RPC { @@ -31,6 +32,7 @@ impl<'a> MessageRead<'a> for RPC { Ok(18) => msg.publish.push(r.read_message::(bytes)?), Ok(26) => msg.control = Some(r.read_message::(bytes)?), Ok(51939474) => msg.testExtension = Some(r.read_message::(bytes)?), + Ok(131350034) => msg.partial = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -46,6 +48,7 @@ impl MessageWrite for RPC { + self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_len((m).get_size())) + + self.partial.as_ref().map_or(0, |m| 4 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { @@ -53,6 +56,7 @@ impl MessageWrite for RPC { for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; } if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; } if let Some(ref s) = self.testExtension { w.write_with_tag(51939474, |w| w.write_message(s))?; } + if let Some(ref s) = self.partial { w.write_with_tag(131350034, |w| w.write_message(s))?; } Ok(()) } } @@ -379,6 +383,7 @@ impl MessageWrite for ControlIDontWant { #[derive(Debug, Default, PartialEq, Clone)] pub struct ControlExtensions { pub testExtension: Option, + pub partialMessages: Option, } impl<'a> MessageRead<'a> for ControlExtensions { @@ -387,6 +392,7 @@ impl<'a> MessageRead<'a> for ControlExtensions { while !r.is_eof() { match r.next_tag(bytes) { Ok(51939472) => msg.testExtension = Some(r.read_bool(bytes)?), + Ok(131350032) => msg.partialMessages = Some(r.read_bool(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -399,10 +405,12 @@ impl MessageWrite for ControlExtensions { fn get_size(&self) -> usize { 0 + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_varint(*(m) as u64)) + + self.partialMessages.as_ref().map_or(0, |m| 4 + sizeof_varint(*(m) as u64)) } fn write_message(&self, w: &mut Writer) -> Result<()> { if let Some(ref s) = self.testExtension { w.write_with_tag(51939472, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.partialMessages { w.write_with_tag(131350032, |w| w.write_bool(*s))?; } Ok(()) } } @@ -654,3 +662,164 @@ impl<'a> From<&'a str> for EncMode { } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialMessagesExtension { + pub topicID: Option>, + pub groupID: Option>, + pub message: Option, + pub iwant: Option, + pub idontwant: Option, + pub ihave: Option, +} + +impl<'a> MessageRead<'a> for PartialMessagesExtension { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topicID = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.groupID = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.message = Some(r.read_message::(bytes)?), + Ok(34) => msg.iwant = Some(r.read_message::(bytes)?), + Ok(42) => msg.idontwant = Some(r.read_message::(bytes)?), + Ok(50) => msg.ihave = Some(r.read_message::(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PartialMessagesExtension { + fn get_size(&self) -> usize { + 0 + + self.topicID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.groupID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.message.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.iwant.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.idontwant.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.ihave.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topicID { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.groupID { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.message { w.write_with_tag(26, |w| w.write_message(s))?; } + if let Some(ref s) = self.iwant { w.write_with_tag(34, |w| w.write_message(s))?; } + if let Some(ref s) = self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } + if let Some(ref s) = self.ihave { w.write_with_tag(50, |w| w.write_message(s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialMessage { + pub data: Option>, +} + +impl<'a> MessageRead<'a> for PartialMessage { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PartialMessage { + fn get_size(&self) -> usize { + 0 + + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.data { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialIWANT { + pub metadata: Option>, +} + +impl<'a> MessageRead<'a> for PartialIWANT { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.metadata = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PartialIWANT { + fn get_size(&self) -> usize { + 0 + + self.metadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.metadata { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialIDONTWANT { } + +impl<'a> MessageRead<'a> for PartialIDONTWANT { + fn from_reader(r: &mut BytesReader, _: &[u8]) -> Result { + r.read_to_end(); + Ok(Self::default()) + } +} + +impl MessageWrite for PartialIDONTWANT { } + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialIHAVE { + pub metadata: Option>, +} + +impl<'a> MessageRead<'a> for PartialIHAVE { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.metadata = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PartialIHAVE { + fn get_size(&self) -> usize { + 0 + + self.metadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.metadata { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index 4a95257b0d8..2951fe2b283 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -18,6 +18,7 @@ message RPC { // must use field numbers larger than 0x200000 to be encoded with at least 4 // bytes optional TestExtension testExtension = 6492434; + optional PartialMessagesExtension partial = 16418754; } message Message { @@ -58,7 +59,7 @@ message ControlPrune { } message ControlIDontWant { - repeated bytes message_ids = 1; + repeated bytes message_ids = 1; } message ControlExtensions { @@ -68,6 +69,7 @@ message ControlExtensions { // Experimental extensions must use field numbers larger than 0x200000 to be // encoded with at least 4 bytes optional bool testExtension = 6492434; + optional bool partialMessages = 16418754; } message TestExtension {} @@ -105,3 +107,27 @@ message TopicDescriptor { } } } + +message PartialMessagesExtension { + optional bytes topicID = 1; + optional bytes groupID = 2; + + optional PartialMessage message = 3; + optional PartialIWANT iwant = 4; + optional PartialIDONTWANT idontwant = 5; + optional PartialIHAVE ihave = 6; +} + +message PartialMessage { + optional bytes data = 1; +} + +message PartialIWANT { + optional bytes metadata = 1; +} + +message PartialIDONTWANT {} + +message PartialIHAVE { + optional bytes metadata = 1; +} diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index a116900be0e..4b5f552d49c 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -113,6 +113,8 @@ mod topic; mod transform; mod types; +pub mod partial; + #[cfg(feature = "metrics")] pub use metrics::Config as MetricsConfig; diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs new file mode 100644 index 00000000000..0577bf1a986 --- /dev/null +++ b/protocols/gossipsub/src/partial.rs @@ -0,0 +1,86 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use crate::error::PartialMessageError; + +/// PartialMessage is a message that can be broken up into parts. +/// This trait allows applications to define custom strategies for splitting large messages +/// into parts and reconstructing them from received partial data. It provides the core +/// operations needed for the gossipsub partial messages extension. +/// +/// The partial message protocol works as follows: +/// 1. Applications implement this trait to define how messages are split and reconstructed +/// 2. Peers advertise available parts using `available_parts()` metadata in PartialIHAVE +/// 3. Peers request missing parts using `missing_parts()` metadata in PartialIWANT +/// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response +/// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` +/// 6. The `group_id()` ties all parts of the same logical message together +pub trait PartialMessage { + /// Returns the unique identifier for this message group. + /// + /// All partial messages belonging to the same logical message should return + /// the same group ID. This is used to associate partial messages together + /// during reconstruction. + fn group_id(&self) -> &[u8]; + + /// Returns metadata describing which parts of the message are missing. + /// + /// This metadata is application-defined and should encode information about + /// what parts need to be requested from other peers. Returns `None` if the + /// message is complete or if no specific parts can be identified as missing. + /// + /// The returned bytes will be sent in PartialIWANT messages to request + /// missing parts from peers. + fn missing_parts(&self) -> Option<&[u8]>; + + /// Returns metadata describing which parts of the message are available. + /// + /// This metadata is application-defined and should encode information about + /// what parts this peer can provide to others. Returns `None` if no parts + /// are available. + /// + /// The returned bytes will be sent in PartialIHAVE messages to advertise + /// available parts to peers. + fn available_parts(&self) -> Option<&[u8]>; + + /// Generates partial message bytes from the given metadata. + /// + /// When a peer requests specific parts (via PartialIWANT), this method + /// generates the actual message data to send back. The `metadata` parameter + /// describes what parts are being requested. + /// + /// Returns a tuple of: + /// - The encoded partial message bytes to send over the network + /// - Optional remaining metadata if more parts are still available after this one + fn partial_message_bytes_from_metadata(&self, metadata: &[u8]) -> (Vec, Option>); + + /// Extends this message with received partial message data. + /// + /// When partial message data is received from a peer, this method integrates + /// it into the current message state. The implementation should validate and + /// store the received data appropriately. + /// + /// Returns `Ok(())` if the data was successfully integrated, or `Err`, + /// if the data was invalid or couldn't be processed. + fn extend_from_encoded_partial_message( + &mut self, + data: &[u8], + ) -> Result<(), PartialMessageError>; +} diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index e91bbbb1837..26e3250092f 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -562,8 +562,9 @@ impl Decoder for GossipsubCodec { }) .collect(); - let extension_msg = rpc_control.extensions.map(|extensions| Extensions { + let extensions_msg = rpc_control.extensions.map(|extensions| Extensions { test_extension: extensions.testExtension, + partial_messages: extensions.partialMessages, }); control_msgs.extend(ihave_msgs); @@ -571,7 +572,7 @@ impl Decoder for GossipsubCodec { control_msgs.extend(graft_msgs); control_msgs.extend(prune_msgs); control_msgs.extend(idontwant_msgs); - control_msgs.push(ControlAction::Extensions(extension_msg)); + control_msgs.push(ControlAction::Extensions(extensions_msg)); } Ok(Some(HandlerEvent::Message { @@ -590,7 +591,7 @@ impl Decoder for GossipsubCodec { }) .collect(), control_msgs, - test_extension: rpc.testExtension.map(|_extension| TestExtension {}), + test_extension: rpc.testExtension.map(|_test_extension| TestExtension {}), }, invalid_messages, })) diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 59fe4cfe407..5af135a995b 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -324,6 +324,7 @@ pub struct IDontWant { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Extensions { pub(crate) test_extension: Option, + pub(crate) partial_messages: Option, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -379,6 +380,7 @@ impl From for proto::RPC { publish: vec![message.into()], control: None, testExtension: None, + partial: None, }, RpcOut::Forward { message, @@ -388,6 +390,7 @@ impl From for proto::RPC { subscriptions: Vec::new(), control: None, testExtension: None, + partial: None, }, RpcOut::Subscribe(topic) => proto::RPC { publish: Vec::new(), @@ -397,6 +400,7 @@ impl From for proto::RPC { }], control: None, testExtension: None, + partial: None, }, RpcOut::Unsubscribe(topic) => proto::RPC { publish: Vec::new(), @@ -406,6 +410,7 @@ impl From for proto::RPC { }], control: None, testExtension: None, + partial: None, }, RpcOut::IHave(IHave { topic_hash, @@ -425,6 +430,7 @@ impl From for proto::RPC { extensions: None, }), testExtension: None, + partial: None, }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { publish: Vec::new(), @@ -440,6 +446,7 @@ impl From for proto::RPC { extensions: None, }), testExtension: None, + partial: None, }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { publish: Vec::new(), @@ -455,6 +462,7 @@ impl From for proto::RPC { extensions: None, }), testExtension: None, + partial: None, }, RpcOut::Prune(Prune { topic_hash, @@ -484,6 +492,7 @@ impl From for proto::RPC { extensions: None, }), testExtension: None, + partial: None, } } RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { @@ -500,8 +509,12 @@ impl From for proto::RPC { extensions: None, }), testExtension: None, + partial: None, }, - RpcOut::Extensions(Extensions { test_extension }) => proto::RPC { + RpcOut::Extensions(Extensions { + partial_messages, + test_extension, + }) => proto::RPC { publish: Vec::new(), subscriptions: Vec::new(), control: Some(proto::ControlMessage { @@ -512,15 +525,18 @@ impl From for proto::RPC { idontwant: vec![], extensions: Some(proto::ControlExtensions { testExtension: test_extension, + partialMessages: partial_messages, }), }), testExtension: None, + partial: None, }, RpcOut::TestExtension => proto::RPC { subscriptions: vec![], publish: vec![], control: None, testExtension: Some(proto::TestExtension {}), + partial: None, }, } } From 9aa7ead7eadeb58eddc253358ee64c5157d88dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 10 Sep 2025 13:49:12 +0100 Subject: [PATCH 04/68] implement sending and receiving partial messages at te protocol level --- protocols/gossipsub/src/behaviour.rs | 8 ++- protocols/gossipsub/src/protocol.rs | 46 +++++++++++++- protocols/gossipsub/src/rpc.rs | 3 +- protocols/gossipsub/src/types.rs | 92 ++++++++++++++++++++++++++++ 4 files changed, 145 insertions(+), 4 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 63c3234a872..f8d561d8d34 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -2918,7 +2918,10 @@ where failed_messages.non_priority += 1; failed_messages.forward += 1; } - RpcOut::IWant(_) | RpcOut::IHave(_) | RpcOut::IDontWant(_) => { + RpcOut::IWant(_) + | RpcOut::IHave(_) + | RpcOut::IDontWant(_) + | RpcOut::PartialMessage { .. } => { failed_messages.non_priority += 1; } RpcOut::Graft(_) @@ -3404,6 +3407,9 @@ where self.handle_extensions(&propagation_source, extensions); } } + ControlAction::PartialMessages(_partial) => { + todo!("Handle partial messages in behaviour") + } } } if !ihave_msgs.is_empty() { diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 26e3250092f..3a86d19de50 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -35,8 +35,9 @@ use crate::{ rpc_proto::proto, topic::TopicHash, types::{ - ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, - Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, TestExtension, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PartialMessage, + PartialMessageKind, PeerInfo, PeerKind, Prune, RawMessage, RpcIn, Subscription, + SubscriptionAction, TestExtension, }, ValidationError, }; @@ -575,6 +576,46 @@ impl Decoder for GossipsubCodec { control_msgs.push(ControlAction::Extensions(extensions_msg)); } + let partial_message = rpc.partial.and_then(|partial_proto| { + // Extract topic and group context + let Some(topic_id_bytes) = partial_proto.topicID else { + tracing::debug!("Partial message without topic_id, discarding"); + return None; + }; + let topic_id = TopicHash::from_raw(String::from_utf8_lossy(&topic_id_bytes)); + + let Some(group_id) = partial_proto.groupID else { + tracing::debug!("Partial message without group_id, discarding"); + return None; + }; + + let mut messages = vec![]; + + if let Some(proto::PartialIHAVE { + metadata: Some(metadata), + }) = partial_proto.ihave + { + messages.push(PartialMessageKind::IHave { metadata }); + } + + if let Some(proto::PartialIWANT { + metadata: Some(metadata), + }) = partial_proto.iwant + { + messages.push(PartialMessageKind::IWant { metadata }); + } + + if let Some(proto::PartialMessage { data: Some(data) }) = partial_proto.message { + messages.push(PartialMessageKind::Publish { data }); + } + + Some(PartialMessage { + topic_id, + group_id, + messages, + }) + }); + Ok(Some(HandlerEvent::Message { rpc: RpcIn { messages, @@ -592,6 +633,7 @@ impl Decoder for GossipsubCodec { .collect(), control_msgs, test_extension: rpc.testExtension.map(|_test_extension| TestExtension {}), + partial_message, }, invalid_messages, })) diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index 3b20e70afa9..bc2e82d40f6 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -94,7 +94,8 @@ impl Sender { | RpcOut::IHave(_) | RpcOut::IWant(_) | RpcOut::IDontWant(_) - | RpcOut::TestExtension => &self.non_priority_sender, + | RpcOut::TestExtension + | RpcOut::PartialMessage { .. } => &self.non_priority_sender, }; sender.try_send(rpc).map_err(|err| err.into_inner()) } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 5af135a995b..c1179c78a6d 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -277,6 +277,8 @@ pub enum ControlAction { IDontWant(IDontWant), /// The Node has sent us its supported extensions. Extensions(Option), + /// Partial messages extension. + PartialMessages(PartialMessage), } /// Node broadcasts known messages per topic - IHave control message. @@ -320,6 +322,28 @@ pub struct IDontWant { pub(crate) message_ids: Vec, } +/// The type of partial message being sent and received. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum PartialMessageKind { + /// Partial IHAVE message. + IHave { metadata: Vec }, + /// Partial IWANT message. + IWant { metadata: Vec }, + /// Partial message data. + Publish { data: Vec }, +} + +/// A received partial message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct PartialMessage { + /// The topic ID this partial message belongs to. + pub topic_id: TopicHash, + /// The group ID that identifies the complete logical message. + pub group_id: Vec, + /// The specific partial message type and data. + pub messages: Vec, +} + /// The node has sent us the supported Gossipsub Extensions. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Extensions { @@ -358,6 +382,12 @@ pub enum RpcOut { Extensions(Extensions), /// Send a test extension message. TestExtension, + /// Send a partial messages extension. + PartialMessage { + group_id: Vec, + topic_id: String, + message: PartialMessageKind, + }, } impl RpcOut { @@ -538,6 +568,64 @@ impl From for proto::RPC { testExtension: Some(proto::TestExtension {}), partial: None, }, + RpcOut::PartialMessage { + group_id, + topic_id, + message: PartialMessageKind::Publish { data }, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + message: Some(proto::PartialMessage { data: Some(data) }), + iwant: None, + idontwant: None, + ihave: None, + }), + }, + RpcOut::PartialMessage { + group_id, + topic_id, + message: PartialMessageKind::IHave { metadata }, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + message: None, + iwant: None, + idontwant: None, + ihave: Some(proto::PartialIHAVE { + metadata: Some(metadata), + }), + }), + }, + RpcOut::PartialMessage { + group_id, + topic_id, + message: PartialMessageKind::IWant { metadata }, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + message: None, + iwant: Some(proto::PartialIWANT { + metadata: Some(metadata), + }), + idontwant: None, + ihave: None, + }), + }, } } } @@ -553,6 +641,8 @@ pub struct RpcIn { pub control_msgs: Vec, /// Gossipsub test extension. pub test_extension: Option, + /// Partial messages extension. + pub partial_message: Option, } impl fmt::Debug for RpcIn { @@ -567,6 +657,8 @@ impl fmt::Debug for RpcIn { if !self.control_msgs.is_empty() { b.field("control_msgs", &self.control_msgs); } + b.field("partial_messages", &self.partial_message); + b.finish() } } From bc45d3ef4523753f97f692944a3bb2290bd88fca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 12 Sep 2025 19:52:31 +0100 Subject: [PATCH 05/68] add partial_publish method --- protocols/gossipsub/src/behaviour.rs | 285 +++++++++++++++------ protocols/gossipsub/src/behaviour/tests.rs | 29 +++ protocols/gossipsub/src/config.rs | 44 ++++ protocols/gossipsub/src/error.rs | 3 + protocols/gossipsub/src/lib.rs | 1 + protocols/gossipsub/src/partial.rs | 71 ++++- protocols/gossipsub/src/protocol.rs | 28 +- protocols/gossipsub/src/types.rs | 86 ++----- 8 files changed, 374 insertions(+), 173 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index f8d561d8d34..72cf5083d17 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -56,6 +56,7 @@ use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty use crate::{ backoff::BackoffStorage, config::{Config, ValidationMode}, + error::PartialMessageError, gossip_promises::GossipPromises, handler::{Handler, HandlerEvent, HandlerIn}, mcache::MessageCache, @@ -69,10 +70,10 @@ use crate::{ transform::{DataTransform, IdentityTransform}, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, - MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, - SubscriptionAction, + MessageId, PartialMessage, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, + Subscription, SubscriptionAction, }, - FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, + FailedMessages, Partial, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; #[cfg(test)] @@ -141,6 +142,15 @@ pub enum Event { /// The decompressed message itself. message: Message, }, + /// A Partial message has been completed. + Partial { + /// The peer that forwarded us this message. + propagation_source: PeerId, + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The specific partial message data. + data: Vec, + }, /// A remote subscribed to a topic. Subscribed { /// Remote that has subscribed. @@ -258,7 +268,7 @@ impl From for PublishConfig { /// /// The TopicSubscriptionFilter allows applications to implement specific filters on topics to /// prevent unwanted messages being propagated and evaluated. -pub struct Behaviour { +pub struct Behaviour { /// Configuration providing gossipsub performance parameters. config: Config, @@ -274,7 +284,7 @@ pub struct Behaviour { /// A set of connected peers, indexed by their [`PeerId`] tracking both the [`PeerKind`] and /// the set of [`ConnectionId`]s. - connected_peers: HashMap, + connected_peers: HashMap>, /// A set of all explicit peers. These are peers that remain connected and we unconditionally /// forward messages to, outside of the scoring system. @@ -330,6 +340,9 @@ pub struct Behaviour { /// user to implement arbitrary topic-based compression algorithms. data_transform: D, + /// Partial messages received. + partial_messages: HashMap, P>>, + /// Keep track of a set of internal metrics relating to gossipsub. #[cfg(feature = "metrics")] metrics: Option, @@ -341,10 +354,11 @@ pub struct Behaviour { gossip_promises: GossipPromises, } -impl Behaviour +impl Behaviour where D: DataTransform + Default, F: TopicSubscriptionFilter + Default, + P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`]. This has no subscription filter and uses no compression. @@ -358,10 +372,11 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform + Default, F: TopicSubscriptionFilter, + P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom subscription filter. @@ -379,10 +394,11 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform, F: TopicSubscriptionFilter + Default, + P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom data transform. @@ -401,10 +417,11 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform, F: TopicSubscriptionFilter, + P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom subscription filter and data transform. @@ -448,6 +465,7 @@ where config, subscription_filter, data_transform, + partial_messages: Default::default(), failed_messages: Default::default(), gossip_promises: Default::default(), }) @@ -466,10 +484,11 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform + Send + 'static, F: TopicSubscriptionFilter + Send + 'static, + P: Partial + Send + 'static + Default, { /// Lists the hashes of the topics we are currently subscribed to. pub fn topics(&self) -> impl Iterator { @@ -565,67 +584,17 @@ where true } - /// Publishes a message with multiple topics to the network. - pub fn publish( - &mut self, - topic: impl Into, - data: impl Into>, - ) -> Result { - let data = data.into(); - let topic = topic.into(); - - // Transform the data before building a raw_message. - let transformed_data = self - .data_transform - .outbound_transform(&topic.clone(), data.clone())?; - - let max_transmit_size_for_topic = self - .config - .protocol_config() - .max_transmit_size_for_topic(&topic); - - // check that the size doesn't exceed the max transmission size. - if transformed_data.len() > max_transmit_size_for_topic { - return Err(PublishError::MessageTooLarge); - } - - let mesh_n = self.config.mesh_n_for_topic(&topic); - let raw_message = self.build_raw_message(topic, transformed_data)?; - - // calculate the message id from the un-transformed data - let msg_id = self.config.message_id(&Message { - source: raw_message.source, - data, // the uncompressed form - sequence_number: raw_message.sequence_number, - topic: raw_message.topic.clone(), - }); - - // Check the if the message has been published before - if self.duplicate_cache.contains(&msg_id) { - // This message has already been seen. We don't re-publish messages that have already - // been published on the network. - tracing::warn!( - message_id=%msg_id, - "Not publishing a message that has already been published" - ); - return Err(PublishError::Duplicate); - } - - tracing::trace!(message_id=%msg_id, "Publishing message"); - - let topic_hash = raw_message.topic.clone(); + // Get Peers from the mesh or fanout to publish a message to. + fn get_publish_peers(&mut self, topic_hash: &TopicHash) -> HashSet { + let mesh_n = self.config.mesh_n_for_topic(topic_hash); - let mut peers_on_topic = self + let peers_on_topic = self .connected_peers .iter() - .filter(|(_, p)| p.topics.contains(&topic_hash)) + .filter(|(_, p)| p.topics.contains(topic_hash)) .map(|(peer_id, _)| peer_id) .peekable(); - if peers_on_topic.peek().is_none() { - return Err(PublishError::NoPeersSubscribedToTopic); - } - let mut recipient_peers = HashSet::new(); if self.config.flood_publish() { // Forward to all peers above score and all explicit peers @@ -637,7 +606,7 @@ where .0 })); } else { - match self.mesh.get(&topic_hash) { + match self.mesh.get(topic_hash) { // Mesh peers Some(mesh_peers) => { // We have a mesh set. We want to make sure to publish to at least `mesh_n` @@ -651,7 +620,7 @@ where // Get a random set of peers that are appropriate to send messages too. let peer_list = get_random_peers( &self.connected_peers, - &topic_hash, + topic_hash, needed_extra_peers, |peer| { !mesh_peers.contains(peer) @@ -673,7 +642,7 @@ where // `fanout_peers` is always non-empty if it's `Some`. let fanout_peers = self .fanout - .get(&topic_hash) + .get(topic_hash) .filter(|peers| !peers.is_empty()); // If we have fanout peers add them to the map. if let Some(peers) = fanout_peers { @@ -683,7 +652,7 @@ where } else { // We have no fanout peers, select mesh_n of them and add them to the fanout let new_peers = - get_random_peers(&self.connected_peers, &topic_hash, mesh_n, { + get_random_peers(&self.connected_peers, topic_hash, mesh_n, { |p| { !self.explicit_peers.contains(p) && !self @@ -712,7 +681,7 @@ where // Floodsub peers for (peer, connections) in &self.connected_peers { if connections.kind == PeerKind::Floodsub - && connections.topics.contains(&topic_hash) + && connections.topics.contains(topic_hash) && !self .peer_score .below_threshold(peer, |ts| ts.publish_threshold) @@ -723,6 +692,60 @@ where } } + recipient_peers + } + + /// Publishes a message with multiple topics to the network. + pub fn publish( + &mut self, + topic: impl Into, + data: impl Into>, + ) -> Result { + let data = data.into(); + let topic = topic.into(); + + // Transform the data before building a raw_message. + let transformed_data = self + .data_transform + .outbound_transform(&topic.clone(), data.clone())?; + + let max_transmit_size_for_topic = self + .config + .protocol_config() + .max_transmit_size_for_topic(&topic); + + // check that the size doesn't exceed the max transmission size. + if transformed_data.len() > max_transmit_size_for_topic { + return Err(PublishError::MessageTooLarge); + } + + let raw_message = self.build_raw_message(topic, transformed_data)?; + + // calculate the message id from the un-transformed data + let msg_id = self.config.message_id(&Message { + source: raw_message.source, + data, // the uncompressed form + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + // Check the if the message has been published before + if self.duplicate_cache.contains(&msg_id) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + tracing::warn!( + message_id=%msg_id, + "Not publishing a message that has already been published" + ); + return Err(PublishError::Duplicate); + } + + tracing::trace!(message_id=%msg_id, "Publishing message"); + + let topic_hash = raw_message.topic.clone(); + + let recipient_peers = self.get_publish_peers(&topic_hash); + // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. self.duplicate_cache.insert(msg_id.clone()); @@ -777,6 +800,58 @@ where Ok(msg_id) } + pub fn publish_partial( + &mut self, + topic: impl Into, + mut partial_message: P, + ) -> Result<(), PublishError> { + let topic_id = topic.into(); + if let Some(existing) = self + .partial_messages + .get_mut(&topic_id) + .and_then(|p| p.remove(partial_message.group_id())) + { + // Return err if trying to publish the same partial message state we currently have. + if existing.available_parts() == partial_message.available_parts() { + return Err(PublishError::Duplicate); + } + partial_message + .extend_from_encoded_partial_message(existing.into_data()) + .map_err(PublishError::Partial)?; + } + + let available_parts = partial_message.available_parts().map(|p| p.to_vec()); + let missing_parts = partial_message.missing_parts().map(|p| p.to_vec()); + let group_id = partial_message.group_id().to_vec(); + let message_data = partial_message.as_data().to_vec(); + + // TODO: should we construct a recipient list just for partials? + let recipient_peers = self.get_publish_peers(&topic_id); + let rpc = PartialMessage { + topic_id, + group_id, + iwant: missing_parts, + ihave: available_parts, + message: Some(message_data), + }; + let mut publish_failed = true; + for peer_id in recipient_peers.iter() { + if self.send_message(*peer_id, RpcOut::PartialMessage(rpc.clone())) { + publish_failed = false; + } + } + + if recipient_peers.is_empty() { + return Err(PublishError::NoPeersSubscribedToTopic); + } + + if publish_failed { + return Err(PublishError::AllQueuesFull(recipient_peers.len())); + } + + Ok(()) + } + /// This function should be called when [`Config::validate_messages()`] is `true` after /// the message got validated by the caller. Messages are stored in the ['Memcache'] and /// validation is expected to be fast enough that the messages should still exist in the cache. @@ -1547,6 +1622,29 @@ where } } + /// Handle incoming partial message from a peer + fn handle_partial_message( + &mut self, + peer_id: &PeerId, + partial_message: crate::types::PartialMessage, + ) { + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + "Received partial message" + ); + + // Check if peer exists + let Some(_peer) = self.connected_peers.get(peer_id) else { + tracing::error!( + peer=%peer_id, + "Partial message from unknown peer" + ); + return; + }; + } + /// Removes the specified peer from the mesh, returning true if it was present. fn remove_peer_from_mesh( &mut self, @@ -3136,10 +3234,11 @@ fn get_ip_addr(addr: &Multiaddr) -> Option { }) } -impl NetworkBehaviour for Behaviour +impl NetworkBehaviour for Behaviour where C: Send + 'static + DataTransform, F: Send + 'static + TopicSubscriptionFilter, + P: Send + 'static + Partial + Default, { type ConnectionHandler = Handler; type ToSwarm = Event; @@ -3164,6 +3263,7 @@ where topics: Default::default(), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3175,7 +3275,11 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), - partial_messages: Some(true), + partial_messages: if self.config.partial_messages_extension() { + Some(true) + } else { + None + }, }), ); } @@ -3201,6 +3305,7 @@ where topics: Default::default(), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3212,7 +3317,11 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), - partial_messages: Some(true), + partial_messages: if self.config.partial_messages_extension() { + Some(true) + } else { + None + }, }), ); } @@ -3407,9 +3516,6 @@ where self.handle_extensions(&propagation_source, extensions); } } - ControlAction::PartialMessages(_partial) => { - todo!("Handle partial messages in behaviour") - } } } if !ihave_msgs.is_empty() { @@ -3425,6 +3531,17 @@ where if let Some(_extension) = rpc.test_extension { tracing::debug!("Received Test Extension"); } + + if let Some(partial_message) = rpc.partial_message { + if self.config.partial_messages_extension() { + self.handle_partial_message(&propagation_source, partial_message); + } else { + tracing::debug!( + peer=%propagation_source, + "Ignoring partial message - extension disabled" + ); + } + } } } } @@ -3473,12 +3590,12 @@ where /// This is called when peers are added to any mesh. It checks if the peer existed /// in any other mesh. If this is the first mesh they have joined, it queues a message to notify /// the appropriate connection handler to maintain a connection. -fn peer_added_to_mesh( +fn peer_added_to_mesh( peer_id: PeerId, new_topics: Vec<&TopicHash>, mesh: &HashMap>, events: &mut VecDeque>, - connections: &HashMap, + connections: &HashMap>, ) { // Ensure there is an active connection let connection_id = match connections.get(&peer_id) { @@ -3515,12 +3632,12 @@ fn peer_added_to_mesh( /// This is called when peers are removed from a mesh. It checks if the peer exists /// in any other mesh. If this is the last mesh they have joined, we return true, in order to /// notify the handler to no longer maintain a connection. -fn peer_removed_from_mesh( +fn peer_removed_from_mesh( peer_id: PeerId, old_topic: &TopicHash, mesh: &HashMap>, events: &mut VecDeque>, - connections: &HashMap, + connections: &HashMap>, ) { // Ensure there is an active connection let connection_id = match connections.get(&peer_id) { @@ -3557,8 +3674,8 @@ fn peer_removed_from_mesh( /// Helper function to get a subset of random gossipsub peers for a `topic_hash` /// filtered by the function `f`. The number of peers to get equals the output of `n_map` /// that gets as input the number of filtered peers. -fn get_random_peers_dynamic( - connected_peers: &HashMap, +fn get_random_peers_dynamic( + connected_peers: &HashMap>, topic_hash: &TopicHash, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, @@ -3590,8 +3707,8 @@ fn get_random_peers_dynamic( /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` /// filtered by the function `f`. -fn get_random_peers( - connected_peers: &HashMap, +fn get_random_peers( + connected_peers: &HashMap>, topic_hash: &TopicHash, n: usize, f: impl FnMut(&PeerId) -> bool, diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 36ddf4162aa..99d23d0c6a8 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -250,6 +250,7 @@ where sender, dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -421,6 +422,7 @@ fn proto_to_message(rpc: &proto::RPC) -> RpcIn { .collect(), control_msgs, test_extension: None, + partial_message: None, } } @@ -648,6 +650,7 @@ fn test_join() { sender, dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); receivers.insert(random_peer, receiver); @@ -1046,6 +1049,7 @@ fn test_get_random_peers() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); } @@ -1255,6 +1259,7 @@ fn test_handle_iwant_msg_but_already_sent_idontwant() { message_ids: vec![msg_id.clone()], })], test_extension: None, + partial_message: None, }; gs.on_connection_handler_event( peers[1], @@ -3148,6 +3153,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], test_extension: None, + partial_message: None, }, invalid_messages: Vec::new(), }, @@ -3175,6 +3181,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { subscriptions: vec![subscription], control_msgs: vec![control_action], test_extension: None, + partial_message: None, }, invalid_messages: Vec::new(), }, @@ -3786,6 +3793,7 @@ fn test_scoring_p4_invalid_signature() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + partial_message: None, }, invalid_messages: vec![(m, ValidationError::InvalidSignature)], }, @@ -5546,6 +5554,7 @@ fn parses_idontwant() { message_ids: vec![message_id.clone()], })], test_extension: None, + partial_message: None, }; gs.on_connection_handler_event( peers[1], @@ -5606,6 +5615,7 @@ fn test_all_queues_full() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -5643,6 +5653,7 @@ fn test_slow_peer_returns_failed_publish() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); let peer_id = PeerId::random(); @@ -5657,6 +5668,7 @@ fn test_slow_peer_returns_failed_publish() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -5719,6 +5731,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); peers.push(slow_peer_id); @@ -5737,6 +5750,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -5835,6 +5849,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); peers.push(slow_peer_id); @@ -5853,6 +5868,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -5931,6 +5947,7 @@ fn test_slow_peer_returns_failed_forward() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); peers.push(slow_peer_id); @@ -5949,6 +5966,7 @@ fn test_slow_peer_returns_failed_forward() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -6032,6 +6050,7 @@ fn test_slow_peer_is_downscored_on_publish() { sender: Sender::new(2), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); gs.as_peer_score_mut().add_peer(slow_peer_id); @@ -6047,6 +6066,7 @@ fn test_slow_peer_is_downscored_on_publish() { sender: Sender::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); @@ -6645,6 +6665,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + partial_message: None, }, invalid_messages: vec![], }, @@ -6690,6 +6711,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { subscriptions: vec![], control: None, testExtension: None, + partial: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6751,6 +6773,7 @@ fn test_validation_message_size_within_topic_specific() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + partial_message: None, }, invalid_messages: vec![], }, @@ -6796,6 +6819,7 @@ fn test_validation_message_size_within_topic_specific() { subscriptions: vec![], control: None, testExtension: None, + partial: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6817,6 +6841,7 @@ fn test_validation_message_size_within_topic_specific() { fn test_extensions_message_creation() { let extensions_rpc = RpcOut::Extensions(Extensions { test_extension: Some(true), + partial_messages: None, }); let proto_rpc: proto::RPC = extensions_rpc.into(); @@ -6857,12 +6882,14 @@ fn test_handle_extensions_message() { sender, dont_send: LinkedHashMap::new(), extensions: None, + partial_messages: Default::default(), }, ); // Simulate receiving extensions message let extensions = Extensions { test_extension: Some(false), + partial_messages: None, }; gs.handle_extensions(&peer_id, extensions); @@ -6876,8 +6903,10 @@ fn test_handle_extensions_message() { subscriptions: vec![], control_msgs: vec![ControlAction::Extensions(Some(Extensions { test_extension: Some(true), + partial_messages: None, }))], test_extension: None, + partial_message: None, }; gs.on_connection_handler_event( diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index fa685f3085a..855fcb1f62a 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -119,6 +119,7 @@ pub struct Config { unsubscribe_backoff: Duration, backoff_slack: u32, flood_publish: bool, + partial_messages_extension: bool, graft_flood_threshold: Duration, opportunistic_graft_ticks: u64, opportunistic_graft_peers: usize, @@ -363,6 +364,13 @@ impl Config { self.flood_publish } + /// Whether to enable the partial messages extension. + /// When enabled, gossipsub can handle partial message reconstruction for large messages. + /// The default is false. + pub fn partial_messages_extension(&self) -> bool { + self.partial_messages_extension + } + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, /// then there is an extra score penalty applied to the peer through P7. pub fn graft_flood_threshold(&self) -> Duration { @@ -532,6 +540,7 @@ impl Default for ConfigBuilder { unsubscribe_backoff: Duration::from_secs(10), backoff_slack: 1, flood_publish: true, + partial_messages_extension: false, graft_flood_threshold: Duration::from_secs(10), opportunistic_graft_ticks: 60, opportunistic_graft_peers: 2, @@ -891,6 +900,14 @@ impl ConfigBuilder { self } + /// Whether to enable the partial messages extension. + /// When enabled, gossipsub can handle partial message reconstruction for large messages. + /// The default is false. + pub fn partial_messages_extension(&mut self, partial_messages_extension: bool) -> &mut Self { + self.config.partial_messages_extension = partial_messages_extension; + self + } + /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, /// then there is an extra score penalty applied to the peer through P7. pub fn graft_flood_threshold(&mut self, graft_flood_threshold: Duration) -> &mut Self { @@ -1156,6 +1173,7 @@ impl std::fmt::Debug for Config { let _ = builder.field("prune_backoff", &self.prune_backoff); let _ = builder.field("backoff_slack", &self.backoff_slack); let _ = builder.field("flood_publish", &self.flood_publish); + let _ = builder.field("partial_messages_extension", &self.partial_messages_extension); let _ = builder.field("graft_flood_threshold", &self.graft_flood_threshold); let _ = builder.field( "mesh_outbound_min", @@ -1305,3 +1323,29 @@ mod test { MessageId::from(v) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_partial_messages_extension_config() { + // Test default is false + let config = Config::default(); + assert!(!config.partial_messages_extension()); + + // Test builder can enable it + let config = ConfigBuilder::default() + .partial_messages_extension(true) + .build() + .unwrap(); + assert!(config.partial_messages_extension()); + + // Test builder can explicitly disable it + let config = ConfigBuilder::default() + .partial_messages_extension(false) + .build() + .unwrap(); + assert!(!config.partial_messages_extension()); + } +} diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index df6d0678680..18c8b84205f 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -40,6 +40,9 @@ pub enum PublishError { /// Messages could not be sent because the queues for all peers were full. The usize represents /// the number of peers that were attempted. AllQueuesFull(usize), + + /// An Error while trying to publish a partial message. + Partial(PartialMessageError), } impl std::fmt::Display for PublishError { diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 4b5f552d49c..71c6606124f 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -122,6 +122,7 @@ pub use self::{ behaviour::{Behaviour, Event, MessageAuthenticity}, config::{Config, ConfigBuilder, ValidationMode, Version}, error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}, + partial::Partial, peer_score::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 0577bf1a986..cc4d301ce6e 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -32,7 +32,7 @@ use crate::error::PartialMessageError; /// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together -pub trait PartialMessage { +pub trait Partial { /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -81,6 +81,73 @@ pub trait PartialMessage { /// if the data was invalid or couldn't be processed. fn extend_from_encoded_partial_message( &mut self, - data: &[u8], + data: Vec, ) -> Result<(), PartialMessageError>; + + /// Consumes self and returns the message data. + /// + /// This method should only be called when the partial message reconstruction + /// is complete (i.e., when `missing_parts()` returns `None`). + /// Calling this method on an incomplete partial message may return partial data, + /// invalid data, or panic, depending on the implementation. + /// + /// # Returns + /// + /// The complete message data as a `Vec`. The format and contents of this + /// data are application-defined and should match what would have been sent + /// in a regular gossipsub message. + fn into_data(self) -> Vec; + + /// Returns the complete message data without consuming self. + /// + /// This method provides access to the reconstructed message data for use cases + /// like eager pushing, where the partial message implementation needs to send + /// complete or substantial portions of the message proactively to peers. + /// + /// Unlike `into_data()`, this method does not consume self, allowing the + /// partial message to continue participating in the reconstruction process. + /// The returned data should represent the current state of reconstruction - + /// this may be incomplete data, complete data, or application-specific + /// encoded data depending on the implementation's needs. + /// + /// This method is called during `publish_partial` operations when + /// applications want to eagerly push data to peers without waiting for + /// explicit requests via PartialIWANT messages. + fn as_data(&self) -> &[u8]; +} + +/// Default implementation that disables partial messages. +impl Partial for () { + fn group_id(&self) -> &[u8] { + &[] + } + + fn missing_parts(&self) -> Option<&[u8]> { + None + } + + fn available_parts(&self) -> Option<&[u8]> { + None + } + + fn partial_message_bytes_from_metadata(&self, _metadata: &[u8]) -> (Vec, Option>) { + (vec![], None) + } + + fn extend_from_encoded_partial_message( + &mut self, + _data: Vec, + ) -> Result<(), PartialMessageError> { + // This should never be called since we never advertise having or wanting parts, + // but if it is called, just ignore the data silently + Ok(()) + } + + fn into_data(self) -> Vec { + vec![] + } + + fn as_data(&self) -> &[u8] { + &[] + } } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 3a86d19de50..b8adcc0359a 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -36,8 +36,8 @@ use crate::{ topic::TopicHash, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PartialMessage, - PartialMessageKind, PeerInfo, PeerKind, Prune, RawMessage, RpcIn, Subscription, - SubscriptionAction, TestExtension, + PeerInfo, PeerKind, Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, + TestExtension, }, ValidationError, }; @@ -589,30 +589,12 @@ impl Decoder for GossipsubCodec { return None; }; - let mut messages = vec![]; - - if let Some(proto::PartialIHAVE { - metadata: Some(metadata), - }) = partial_proto.ihave - { - messages.push(PartialMessageKind::IHave { metadata }); - } - - if let Some(proto::PartialIWANT { - metadata: Some(metadata), - }) = partial_proto.iwant - { - messages.push(PartialMessageKind::IWant { metadata }); - } - - if let Some(proto::PartialMessage { data: Some(data) }) = partial_proto.message { - messages.push(PartialMessageKind::Publish { data }); - } - Some(PartialMessage { topic_id, group_id, - messages, + iwant: partial_proto.iwant.and_then(|iwant| iwant.metadata), + ihave: partial_proto.ihave.and_then(|ihave| ihave.metadata), + message: partial_proto.message.and_then(|message| message.data), }) }); diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index c1179c78a6d..4fdf1bb23be 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,7 +19,10 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. -use std::{collections::BTreeSet, fmt, fmt::Debug}; +use std::{ + collections::{BTreeSet, HashMap}, + fmt::{self, Debug}, +}; use futures_timer::Delay; use hashlink::LinkedHashMap; @@ -102,7 +105,7 @@ impl std::fmt::Debug for MessageId { #[derive(Debug)] /// Connected peer details. -pub(crate) struct PeerDetails { +pub(crate) struct PeerDetails

{ /// The kind of protocol the peer supports. pub(crate) kind: PeerKind, /// The Extensions supported by the peer if any. @@ -117,6 +120,8 @@ pub(crate) struct PeerDetails { pub(crate) sender: Sender, /// Don't send messages. pub(crate) dont_send: LinkedHashMap, + /// Peer Partial messages. + pub(crate) partial_messages: HashMap<(TopicHash, Vec), P>, } /// Describes the types of peers that can exist in the gossipsub context. @@ -277,8 +282,6 @@ pub enum ControlAction { IDontWant(IDontWant), /// The Node has sent us its supported extensions. Extensions(Option), - /// Partial messages extension. - PartialMessages(PartialMessage), } /// Node broadcasts known messages per topic - IHave control message. @@ -322,17 +325,6 @@ pub struct IDontWant { pub(crate) message_ids: Vec, } -/// The type of partial message being sent and received. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum PartialMessageKind { - /// Partial IHAVE message. - IHave { metadata: Vec }, - /// Partial IWANT message. - IWant { metadata: Vec }, - /// Partial message data. - Publish { data: Vec }, -} - /// A received partial message. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct PartialMessage { @@ -340,8 +332,12 @@ pub struct PartialMessage { pub topic_id: TopicHash, /// The group ID that identifies the complete logical message. pub group_id: Vec, - /// The specific partial message type and data. - pub messages: Vec, + /// The partial parts we want. + pub iwant: Option>, + /// The partial parts we have. + pub ihave: Option>, + /// The partial message itself + pub message: Option>, } /// The node has sent us the supported Gossipsub Extensions. @@ -383,11 +379,7 @@ pub enum RpcOut { /// Send a test extension message. TestExtension, /// Send a partial messages extension. - PartialMessage { - group_id: Vec, - topic_id: String, - message: PartialMessageKind, - }, + PartialMessage(PartialMessage), } impl RpcOut { @@ -568,29 +560,13 @@ impl From for proto::RPC { testExtension: Some(proto::TestExtension {}), partial: None, }, - RpcOut::PartialMessage { - group_id, + RpcOut::PartialMessage(PartialMessage { topic_id, - message: PartialMessageKind::Publish { data }, - } => proto::RPC { - subscriptions: vec![], - publish: vec![], - control: None, - testExtension: None, - partial: Some(proto::PartialMessagesExtension { - topicID: Some(topic_id.as_str().as_bytes().to_vec()), - groupID: Some(group_id), - message: Some(proto::PartialMessage { data: Some(data) }), - iwant: None, - idontwant: None, - ihave: None, - }), - }, - RpcOut::PartialMessage { group_id, - topic_id, - message: PartialMessageKind::IHave { metadata }, - } => proto::RPC { + iwant, + ihave, + message, + }) => proto::RPC { subscriptions: vec![], publish: vec![], control: None, @@ -598,32 +574,14 @@ impl From for proto::RPC { partial: Some(proto::PartialMessagesExtension { topicID: Some(topic_id.as_str().as_bytes().to_vec()), groupID: Some(group_id), - message: None, - iwant: None, - idontwant: None, - ihave: Some(proto::PartialIHAVE { + message: message.map(|data| proto::PartialMessage { data: Some(data) }), + iwant: iwant.map(|metadata| proto::PartialIWANT { metadata: Some(metadata), }), - }), - }, - RpcOut::PartialMessage { - group_id, - topic_id, - message: PartialMessageKind::IWant { metadata }, - } => proto::RPC { - subscriptions: vec![], - publish: vec![], - control: None, - testExtension: None, - partial: Some(proto::PartialMessagesExtension { - topicID: Some(topic_id.as_str().as_bytes().to_vec()), - groupID: Some(group_id), - message: None, - iwant: Some(proto::PartialIWANT { + ihave: ihave.map(|metadata| proto::PartialIHAVE { metadata: Some(metadata), }), idontwant: None, - ihave: None, }), }, } From 8b1c086fe6bcc5090784ed536cfeebaca5eefc01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 12 Sep 2025 20:39:04 +0100 Subject: [PATCH 06/68] update trait extend method to receive slice instead --- protocols/gossipsub/src/partial.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index cc4d301ce6e..fe8baabd178 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -81,7 +81,7 @@ pub trait Partial { /// if the data was invalid or couldn't be processed. fn extend_from_encoded_partial_message( &mut self, - data: Vec, + data: &[u8], ) -> Result<(), PartialMessageError>; /// Consumes self and returns the message data. @@ -136,7 +136,7 @@ impl Partial for () { fn extend_from_encoded_partial_message( &mut self, - _data: Vec, + _data: &[u8], ) -> Result<(), PartialMessageError> { // This should never be called since we never advertise having or wanting parts, // but if it is called, just ignore the data silently From b1b8dbe0339c1b5a4d95f8f5c390c957f97dd890 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 15 Sep 2025 11:07:00 +0100 Subject: [PATCH 07/68] implement Partial message handling and publishing at the behaviour level --- protocols/gossipsub/src/behaviour.rs | 121 ++++++++++++++++++++------- protocols/gossipsub/src/partial.rs | 51 ++--------- protocols/gossipsub/src/types.rs | 12 ++- 3 files changed, 108 insertions(+), 76 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 72cf5083d17..7b45d93e319 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -56,7 +56,6 @@ use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty use crate::{ backoff::BackoffStorage, config::{Config, ValidationMode}, - error::PartialMessageError, gossip_promises::GossipPromises, handler::{Handler, HandlerEvent, HandlerIn}, mcache::MessageCache, @@ -142,14 +141,18 @@ pub enum Event { /// The decompressed message itself. message: Message, }, - /// A Partial message has been completed. + /// A new partial message has been received. Partial { /// The peer that forwarded us this message. propagation_source: PeerId, /// The group ID that identifies the complete logical message. group_id: Vec, - /// The specific partial message data. - data: Vec, + /// The partial message data. + message: Option>, + /// The partial message iwant. + iwant: Option>, + /// The partial message ihave. + ihave: Option>, }, /// A remote subscribed to a topic. Subscribed { @@ -284,7 +287,7 @@ pub struct Behaviour>, + connected_peers: HashMap, /// A set of all explicit peers. These are peers that remain connected and we unconditionally /// forward messages to, outside of the scoring system. @@ -803,39 +806,63 @@ where pub fn publish_partial( &mut self, topic: impl Into, - mut partial_message: P, + partial_message: P, ) -> Result<(), PublishError> { let topic_id = topic.into(); if let Some(existing) = self .partial_messages .get_mut(&topic_id) - .and_then(|p| p.remove(partial_message.group_id())) + .and_then(|p| p.get_mut(partial_message.group_id())) { // Return err if trying to publish the same partial message state we currently have. if existing.available_parts() == partial_message.available_parts() { return Err(PublishError::Duplicate); } - partial_message - .extend_from_encoded_partial_message(existing.into_data()) - .map_err(PublishError::Partial)?; } let available_parts = partial_message.available_parts().map(|p| p.to_vec()); let missing_parts = partial_message.missing_parts().map(|p| p.to_vec()); let group_id = partial_message.group_id().to_vec(); - let message_data = partial_message.as_data().to_vec(); // TODO: should we construct a recipient list just for partials? let recipient_peers = self.get_publish_peers(&topic_id); - let rpc = PartialMessage { - topic_id, - group_id, - iwant: missing_parts, - ihave: available_parts, - message: Some(message_data), - }; let mut publish_failed = true; for peer_id in recipient_peers.iter() { + // TODO: this can be optimized, we are going to get the peer again on `send_message` + let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { + tracing::error!(peer = %peer_id, + "Could not send rpc to connection handler, peer doesn't exist in connected peer list"); + continue; + }; + + let peer_partials = peer.partial_messages.entry(topic_id.clone()).or_default(); + let peer_partial = peer_partials.entry(group_id.clone()).or_default(); + + let Ok((message_data, rest_wanted)) = + partial_message.partial_message_bytes_from_metadata(&peer_partial.iwant) + else { + tracing::error!(peer = %peer_id, group_id = ?group_id, + "Could not reconstruct message bytes for peer metadata"); + peer_partials.remove(&group_id); + continue; + }; + + if let Some(r) = rest_wanted { + peer_partial.iwant = r; + } else { + // Peer partial is now complete + // remove it from the list + peer_partials.remove(&group_id); + } + + let rpc = PartialMessage { + topic_id: topic_id.clone(), + group_id: group_id.clone(), + iwant: missing_parts.clone(), + ihave: available_parts.clone(), + message: Some(message_data), + }; + if self.send_message(*peer_id, RpcOut::PartialMessage(rpc.clone())) { publish_failed = false; } @@ -1623,11 +1650,7 @@ where } /// Handle incoming partial message from a peer - fn handle_partial_message( - &mut self, - peer_id: &PeerId, - partial_message: crate::types::PartialMessage, - ) { + fn handle_partial_message(&mut self, peer_id: &PeerId, partial_message: PartialMessage) { tracing::debug!( peer=%peer_id, topic=%partial_message.topic_id, @@ -1636,13 +1659,47 @@ where ); // Check if peer exists - let Some(_peer) = self.connected_peers.get(peer_id) else { + let Some(peer) = self.connected_peers.get_mut(peer_id) else { tracing::error!( peer=%peer_id, "Partial message from unknown peer" ); return; }; + + let peer_partial = peer + .partial_messages + .entry(partial_message.topic_id.clone()) + .or_default() + .entry(partial_message.group_id.clone()) + .or_default(); + + // Noop if the received partial is the same we already have. + if partial_message.ihave.as_ref() == Some(&peer_partial.ihave) + && partial_message.iwant.as_ref() == Some(&peer_partial.iwant) + && partial_message.message.as_ref() == Some(&peer_partial.message) + { + return; + } + + if let Some(ref iwant) = partial_message.iwant { + peer_partial.iwant = iwant.clone(); + } + if let Some(ref ihave) = partial_message.ihave { + peer_partial.ihave = ihave.clone(); + } + if let Some(ref message) = partial_message.message { + peer_partial.message = message.clone(); + } + + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + iwant: partial_message.iwant, + ihave: partial_message.ihave, + })); } /// Removes the specified peer from the mesh, returning true if it was present. @@ -3590,12 +3647,12 @@ where /// This is called when peers are added to any mesh. It checks if the peer existed /// in any other mesh. If this is the first mesh they have joined, it queues a message to notify /// the appropriate connection handler to maintain a connection. -fn peer_added_to_mesh( +fn peer_added_to_mesh( peer_id: PeerId, new_topics: Vec<&TopicHash>, mesh: &HashMap>, events: &mut VecDeque>, - connections: &HashMap>, + connections: &HashMap, ) { // Ensure there is an active connection let connection_id = match connections.get(&peer_id) { @@ -3632,12 +3689,12 @@ fn peer_added_to_mesh( /// This is called when peers are removed from a mesh. It checks if the peer exists /// in any other mesh. If this is the last mesh they have joined, we return true, in order to /// notify the handler to no longer maintain a connection. -fn peer_removed_from_mesh( +fn peer_removed_from_mesh( peer_id: PeerId, old_topic: &TopicHash, mesh: &HashMap>, events: &mut VecDeque>, - connections: &HashMap>, + connections: &HashMap, ) { // Ensure there is an active connection let connection_id = match connections.get(&peer_id) { @@ -3674,8 +3731,8 @@ fn peer_removed_from_mesh( /// Helper function to get a subset of random gossipsub peers for a `topic_hash` /// filtered by the function `f`. The number of peers to get equals the output of `n_map` /// that gets as input the number of filtered peers. -fn get_random_peers_dynamic( - connected_peers: &HashMap>, +fn get_random_peers_dynamic( + connected_peers: &HashMap, topic_hash: &TopicHash, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, @@ -3707,8 +3764,8 @@ fn get_random_peers_dynamic( /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` /// filtered by the function `f`. -fn get_random_peers( - connected_peers: &HashMap>, +fn get_random_peers( + connected_peers: &HashMap, topic_hash: &TopicHash, n: usize, f: impl FnMut(&PeerId) -> bool, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index fe8baabd178..b8b4fb90b87 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -69,7 +69,10 @@ pub trait Partial { /// Returns a tuple of: /// - The encoded partial message bytes to send over the network /// - Optional remaining metadata if more parts are still available after this one - fn partial_message_bytes_from_metadata(&self, metadata: &[u8]) -> (Vec, Option>); + fn partial_message_bytes_from_metadata( + &self, + metadata: &[u8], + ) -> Result<(Vec, Option>), PartialMessageError>; /// Extends this message with received partial message data. /// @@ -83,37 +86,6 @@ pub trait Partial { &mut self, data: &[u8], ) -> Result<(), PartialMessageError>; - - /// Consumes self and returns the message data. - /// - /// This method should only be called when the partial message reconstruction - /// is complete (i.e., when `missing_parts()` returns `None`). - /// Calling this method on an incomplete partial message may return partial data, - /// invalid data, or panic, depending on the implementation. - /// - /// # Returns - /// - /// The complete message data as a `Vec`. The format and contents of this - /// data are application-defined and should match what would have been sent - /// in a regular gossipsub message. - fn into_data(self) -> Vec; - - /// Returns the complete message data without consuming self. - /// - /// This method provides access to the reconstructed message data for use cases - /// like eager pushing, where the partial message implementation needs to send - /// complete or substantial portions of the message proactively to peers. - /// - /// Unlike `into_data()`, this method does not consume self, allowing the - /// partial message to continue participating in the reconstruction process. - /// The returned data should represent the current state of reconstruction - - /// this may be incomplete data, complete data, or application-specific - /// encoded data depending on the implementation's needs. - /// - /// This method is called during `publish_partial` operations when - /// applications want to eagerly push data to peers without waiting for - /// explicit requests via PartialIWANT messages. - fn as_data(&self) -> &[u8]; } /// Default implementation that disables partial messages. @@ -130,8 +102,11 @@ impl Partial for () { None } - fn partial_message_bytes_from_metadata(&self, _metadata: &[u8]) -> (Vec, Option>) { - (vec![], None) + fn partial_message_bytes_from_metadata( + &self, + _metadata: &[u8], + ) -> Result<(Vec, Option>), PartialMessageError> { + Ok((vec![], None)) } fn extend_from_encoded_partial_message( @@ -142,12 +117,4 @@ impl Partial for () { // but if it is called, just ignore the data silently Ok(()) } - - fn into_data(self) -> Vec { - vec![] - } - - fn as_data(&self) -> &[u8] { - &[] - } } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 4fdf1bb23be..e508442ecfa 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -105,7 +105,7 @@ impl std::fmt::Debug for MessageId { #[derive(Debug)] /// Connected peer details. -pub(crate) struct PeerDetails

{ +pub(crate) struct PeerDetails { /// The kind of protocol the peer supports. pub(crate) kind: PeerKind, /// The Extensions supported by the peer if any. @@ -121,7 +121,15 @@ pub(crate) struct PeerDetails

{ /// Don't send messages. pub(crate) dont_send: LinkedHashMap, /// Peer Partial messages. - pub(crate) partial_messages: HashMap<(TopicHash, Vec), P>, + pub(crate) partial_messages: HashMap, PartialData>>, +} + +/// The partial message data the peer has. +#[derive(Debug, Default)] +pub(crate) struct PartialData { + pub(crate) ihave: Vec, + pub(crate) iwant: Vec, + pub(crate) message: Vec, } /// Describes the types of peers that can exist in the gossipsub context. From b433556e5c7466ee8c844cd38f4aea6f29ef4ea1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Sep 2025 08:11:28 +0000 Subject: [PATCH 08/68] deps: bump Swatinem/rust-cache from 2.8.0 to 2.8.1 Pull-Request: #6164. --- .github/workflows/cache-factory.yml | 2 +- .github/workflows/ci.yml | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 798483bce37..36b294e40e1 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97ae879f654..bace02a86b0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,7 +41,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache save-if: false @@ -150,7 +150,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -175,7 +175,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -195,7 +195,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -212,7 +212,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -238,7 +238,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -254,7 +254,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -273,7 +273,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache save-if: false @@ -364,7 +364,7 @@ jobs: steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - run: cargo install --version 0.10.0 pb-rs --locked @@ -390,7 +390,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: From 63705143aeb595ef33d23fab792d9997c54750d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sat, 20 Sep 2025 19:46:46 +0100 Subject: [PATCH 09/68] address Marco's review --- protocols/gossipsub/src/behaviour.rs | 73 +++++++++------------- protocols/gossipsub/src/behaviour/tests.rs | 9 +-- protocols/gossipsub/src/types.rs | 21 +++++-- 3 files changed, 49 insertions(+), 54 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 9a2c82e0ab0..1cfe9d6354c 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -271,7 +271,7 @@ impl From for PublishConfig { /// /// The TopicSubscriptionFilter allows applications to implement specific filters on topics to /// prevent unwanted messages being propagated and evaluated. -pub struct Behaviour { +pub struct Behaviour { /// Configuration providing gossipsub performance parameters. config: Config, @@ -343,9 +343,6 @@ pub struct Behaviour, P>>, - /// Keep track of a set of internal metrics relating to gossipsub. #[cfg(feature = "metrics")] metrics: Option, @@ -357,11 +354,10 @@ pub struct Behaviour Behaviour +impl Behaviour where D: DataTransform + Default, F: TopicSubscriptionFilter + Default, - P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`]. This has no subscription filter and uses no compression. @@ -375,11 +371,10 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform + Default, F: TopicSubscriptionFilter, - P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom subscription filter. @@ -397,11 +392,10 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform, F: TopicSubscriptionFilter + Default, - P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom data transform. @@ -420,11 +414,10 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform, F: TopicSubscriptionFilter, - P: Partial + Default, { /// Creates a Gossipsub [`Behaviour`] struct given a set of parameters specified via a /// [`Config`] and a custom subscription filter and data transform. @@ -468,7 +461,6 @@ where config, subscription_filter, data_transform, - partial_messages: Default::default(), failed_messages: Default::default(), gossip_promises: Default::default(), }) @@ -487,11 +479,10 @@ where } } -impl Behaviour +impl Behaviour where D: DataTransform + Send + 'static, F: TopicSubscriptionFilter + Send + 'static, - P: Partial + Send + 'static + Default, { /// Lists the hashes of the topics we are currently subscribed to. pub fn topics(&self) -> impl Iterator { @@ -804,22 +795,12 @@ where Ok(msg_id) } - pub fn publish_partial( + pub fn publish_partial( &mut self, topic: impl Into, partial_message: P, ) -> Result<(), PublishError> { let topic_id = topic.into(); - if let Some(existing) = self - .partial_messages - .get_mut(&topic_id) - .and_then(|p| p.get_mut(partial_message.group_id())) - { - // Return err if trying to publish the same partial message state we currently have. - if existing.available_parts() == partial_message.available_parts() { - return Err(PublishError::Duplicate); - } - } let available_parts = partial_message.available_parts().map(|p| p.to_vec()); let missing_parts = partial_message.missing_parts().map(|p| p.to_vec()); @@ -840,7 +821,7 @@ where let peer_partial = peer_partials.entry(group_id.clone()).or_default(); let Ok((message_data, rest_wanted)) = - partial_message.partial_message_bytes_from_metadata(&peer_partial.iwant) + partial_message.partial_message_bytes_from_metadata(&peer_partial.wanted) else { tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); @@ -848,13 +829,18 @@ where continue; }; - if let Some(r) = rest_wanted { - peer_partial.iwant = r; - } else { + match rest_wanted { + // No new data to send peer. + Some(r) if r == peer_partial.wanted => { + continue; + } + Some(r) => peer_partial.wanted = r, // Peer partial is now complete // remove it from the list - peer_partials.remove(&group_id); - } + None => { + peer_partials.remove(&group_id); + } + }; let rpc = PartialMessage { topic_id: topic_id.clone(), @@ -1677,21 +1663,17 @@ where .or_default(); // Noop if the received partial is the same we already have. - if partial_message.ihave.as_ref() == Some(&peer_partial.ihave) - && partial_message.iwant.as_ref() == Some(&peer_partial.iwant) - && partial_message.message.as_ref() == Some(&peer_partial.message) + if partial_message.ihave.as_ref() == Some(&peer_partial.has) + && partial_message.iwant.as_ref() == Some(&peer_partial.wanted) { return; } if let Some(ref iwant) = partial_message.iwant { - peer_partial.iwant = iwant.clone(); + peer_partial.wanted = iwant.clone(); } if let Some(ref ihave) = partial_message.ihave { - peer_partial.ihave = ihave.clone(); - } - if let Some(ref message) = partial_message.message { - peer_partial.message = message.clone(); + peer_partial.has = ihave.clone(); } self.events @@ -2695,7 +2677,7 @@ where } self.failed_messages.shrink_to_fit(); - // Flush stale IDONTWANTs. + // Flush stale IDONTWANTs and partial messages. for peer in self.connected_peers.values_mut() { while let Some((_front, instant)) = peer.dont_send.front() { if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { @@ -2704,6 +2686,12 @@ where peer.dont_send.pop_front(); } } + for topics in peer.partial_messages.values_mut() { + topics.retain(|_, partial| { + partial.ttl -= 1; + partial.ttl <= 0 + }); + } } #[cfg(feature = "metrics")] @@ -3281,11 +3269,10 @@ fn get_ip_addr(addr: &Multiaddr) -> Option { }) } -impl NetworkBehaviour for Behaviour +impl NetworkBehaviour for Behaviour where C: Send + 'static + DataTransform, F: Send + 'static + TopicSubscriptionFilter, - P: Send + 'static + Partial + Default, { type ConnectionHandler = Handler; type ToSwarm = Event; diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index a5667aaa143..931ee20dab4 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -32,11 +32,6 @@ use super::*; use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, -<<<<<<< HEAD - rpc::Receiver, - rpc_proto::proto, -======= ->>>>>>> fabf27f2021ba582d9b0d1c15ca47babef453497 subscription_filter::WhitelistSubscriptionFilter, types::{ControlAction, Extensions, RpcIn, RpcOut}, IdentTopic as Topic, @@ -6745,7 +6740,7 @@ fn test_handle_extensions_message() { .unwrap(); let peer_id = PeerId::random(); - let sender = Sender::new(gs.config.connection_handler_queue_len()); + let messages = Queue::new(gs.config.connection_handler_queue_len()); // Add peer without extensions gs.connected_peers.insert( @@ -6755,7 +6750,7 @@ fn test_handle_extensions_message() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: BTreeSet::new(), - sender, + messages, dont_send: LinkedHashMap::new(), extensions: None, partial_messages: Default::default(), diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index e1b3862c82f..f8d9e49d510 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -108,11 +108,24 @@ pub(crate) struct PeerDetails { } /// The partial message data the peer has. -#[derive(Debug, Default)] +#[derive(Debug)] pub(crate) struct PartialData { - pub(crate) ihave: Vec, - pub(crate) iwant: Vec, - pub(crate) message: Vec, + /// The remaining metada needed by the peer + pub(crate) wanted: Vec, + /// The last partial IHAVE received from the peer. + pub(crate) has: Vec, + /// The remaining heartbeats for this message to be deleted. + pub(crate) ttl: usize, +} + +impl Default for PartialData { + fn default() -> Self { + Self { + wanted: Default::default(), + has: Default::default(), + ttl: 5, + } + } } /// Describes the types of peers that can exist in the gossipsub context. From 909a68feff86bd417fccaad4bc18c39379abb0de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sat, 20 Sep 2025 20:22:06 +0100 Subject: [PATCH 10/68] feature gate partial messages --- protocols/gossipsub/Cargo.toml | 1 + protocols/gossipsub/src/behaviour.rs | 50 ++++++++++------------------ protocols/gossipsub/src/config.rs | 44 ------------------------ protocols/gossipsub/src/lib.rs | 5 ++- protocols/gossipsub/src/types.rs | 8 +++-- 5 files changed, 29 insertions(+), 79 deletions(-) diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 1e75a65c143..19ee9c4b66e 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -13,6 +13,7 @@ categories = ["network-programming", "asynchronous"] [features] wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] metrics = ["prometheus-client"] +partial_messages = [] [dependencies] async-channel = "2.3.1" diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 1cfe9d6354c..fba9099d9a0 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -69,12 +69,15 @@ use crate::{ transform::{DataTransform, IdentityTransform}, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, - MessageId, PartialMessage, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, - Subscription, SubscriptionAction, + MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, + SubscriptionAction, }, - FailedMessages, Partial, PublishError, SubscriptionError, TopicScoreParams, ValidationError, + FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; +#[cfg(feature = "partial_messages")] +use crate::{partial::Partial, types::PartialMessage}; + #[cfg(test)] mod tests; @@ -142,18 +145,8 @@ pub enum Event { message: Message, }, /// A new partial message has been received. - Partial { - /// The peer that forwarded us this message. - propagation_source: PeerId, - /// The group ID that identifies the complete logical message. - group_id: Vec, - /// The partial message data. - message: Option>, - /// The partial message iwant. - iwant: Option>, - /// The partial message ihave. - ihave: Option>, - }, + #[cfg(feature = "partial_messages")] + Partial(crate::types::PartialMessage), /// A remote subscribed to a topic. Subscribed { /// Remote that has subscribed. @@ -795,6 +788,7 @@ where Ok(msg_id) } + #[cfg(feature = "partial_messages")] pub fn publish_partial( &mut self, topic: impl Into, @@ -1638,6 +1632,7 @@ where } /// Handle incoming partial message from a peer + #[cfg(feature = "partial_messages")] fn handle_partial_message(&mut self, peer_id: &PeerId, partial_message: PartialMessage) { tracing::debug!( peer=%peer_id, @@ -1677,13 +1672,7 @@ where } self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial { - propagation_source: *peer_id, - group_id: partial_message.group_id, - message: partial_message.message, - iwant: partial_message.iwant, - ihave: partial_message.ihave, - })); + .push_back(ToSwarm::GenerateEvent(Event::Partial(partial_message))); } /// Removes the specified peer from the mesh, returning true if it was present. @@ -2686,6 +2675,7 @@ where peer.dont_send.pop_front(); } } + #[cfg(feature = "partial_messages")] for topics in peer.partial_messages.values_mut() { topics.retain(|_, partial| { partial.ttl -= 1; @@ -3297,6 +3287,7 @@ where topics: Default::default(), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }); // Add the new connection @@ -3309,7 +3300,7 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), - partial_messages: if self.config.partial_messages_extension() { + partial_messages: if cfg!(feature = "partial_messages") { Some(true) } else { None @@ -3341,6 +3332,7 @@ where topics: Default::default(), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }); // Add the new connection @@ -3353,7 +3345,7 @@ where peer_id, RpcOut::Extensions(Extensions { test_extension: Some(true), - partial_messages: if self.config.partial_messages_extension() { + partial_messages: if cfg!(feature = "partial_messages") { Some(true) } else { None @@ -3554,15 +3546,9 @@ where tracing::debug!("Received Test Extension"); } + #[cfg(feature = "partial_messages")] if let Some(partial_message) = rpc.partial_message { - if self.config.partial_messages_extension() { - self.handle_partial_message(&propagation_source, partial_message); - } else { - tracing::debug!( - peer=%propagation_source, - "Ignoring partial message - extension disabled" - ); - } + self.handle_partial_message(&propagation_source, partial_message); } } } diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index 855fcb1f62a..fa685f3085a 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -119,7 +119,6 @@ pub struct Config { unsubscribe_backoff: Duration, backoff_slack: u32, flood_publish: bool, - partial_messages_extension: bool, graft_flood_threshold: Duration, opportunistic_graft_ticks: u64, opportunistic_graft_peers: usize, @@ -364,13 +363,6 @@ impl Config { self.flood_publish } - /// Whether to enable the partial messages extension. - /// When enabled, gossipsub can handle partial message reconstruction for large messages. - /// The default is false. - pub fn partial_messages_extension(&self) -> bool { - self.partial_messages_extension - } - /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, /// then there is an extra score penalty applied to the peer through P7. pub fn graft_flood_threshold(&self) -> Duration { @@ -540,7 +532,6 @@ impl Default for ConfigBuilder { unsubscribe_backoff: Duration::from_secs(10), backoff_slack: 1, flood_publish: true, - partial_messages_extension: false, graft_flood_threshold: Duration::from_secs(10), opportunistic_graft_ticks: 60, opportunistic_graft_peers: 2, @@ -900,14 +891,6 @@ impl ConfigBuilder { self } - /// Whether to enable the partial messages extension. - /// When enabled, gossipsub can handle partial message reconstruction for large messages. - /// The default is false. - pub fn partial_messages_extension(&mut self, partial_messages_extension: bool) -> &mut Self { - self.config.partial_messages_extension = partial_messages_extension; - self - } - /// If a GRAFT comes before `graft_flood_threshold` has elapsed since the last PRUNE, /// then there is an extra score penalty applied to the peer through P7. pub fn graft_flood_threshold(&mut self, graft_flood_threshold: Duration) -> &mut Self { @@ -1173,7 +1156,6 @@ impl std::fmt::Debug for Config { let _ = builder.field("prune_backoff", &self.prune_backoff); let _ = builder.field("backoff_slack", &self.backoff_slack); let _ = builder.field("flood_publish", &self.flood_publish); - let _ = builder.field("partial_messages_extension", &self.partial_messages_extension); let _ = builder.field("graft_flood_threshold", &self.graft_flood_threshold); let _ = builder.field( "mesh_outbound_min", @@ -1323,29 +1305,3 @@ mod test { MessageId::from(v) } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_partial_messages_extension_config() { - // Test default is false - let config = Config::default(); - assert!(!config.partial_messages_extension()); - - // Test builder can enable it - let config = ConfigBuilder::default() - .partial_messages_extension(true) - .build() - .unwrap(); - assert!(config.partial_messages_extension()); - - // Test builder can explicitly disable it - let config = ConfigBuilder::default() - .partial_messages_extension(false) - .build() - .unwrap(); - assert!(!config.partial_messages_extension()); - } -} diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 4f8b92663f1..7adeb0e5a87 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -113,16 +113,19 @@ mod topic; mod transform; mod types; +#[cfg(feature = "partial_messages")] pub mod partial; #[cfg(feature = "metrics")] pub use metrics::Config as MetricsConfig; +#[cfg(feature = "partial_messages")] +pub use self::{error::PartialMessageError, partial::Partial, types::PartialMessage}; + pub use self::{ behaviour::{Behaviour, Event, MessageAuthenticity}, config::{Config, ConfigBuilder, ValidationMode, Version}, error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError}, - partial::Partial, peer_score::{ score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index f8d9e49d510..63d5acbf0d4 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -20,7 +20,7 @@ //! A collection of types using the Gossipsub system. use std::{ - collections::{BTreeSet, HashMap}, + collections::BTreeSet, fmt::{self, Debug}, }; @@ -102,12 +102,15 @@ pub(crate) struct PeerDetails { /// Don't send messages. pub(crate) dont_send: LinkedHashMap, /// Peer Partial messages. - pub(crate) partial_messages: HashMap, PartialData>>, + #[cfg(feature = "partial_messages")] + pub(crate) partial_messages: + std::collections::HashMap, PartialData>>, /// Message queue consumed by the connection handler. pub(crate) messages: Queue, } /// The partial message data the peer has. +#[cfg(feature = "partial_messages")] #[derive(Debug)] pub(crate) struct PartialData { /// The remaining metada needed by the peer @@ -118,6 +121,7 @@ pub(crate) struct PartialData { pub(crate) ttl: usize, } +#[cfg(feature = "partial_messages")] impl Default for PartialData { fn default() -> Self { Self { From 046ad47a09d9dfab6f963448ae54624cd7e4a7ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 24 Sep 2025 14:42:46 +0100 Subject: [PATCH 11/68] remove PartialIDONWANT from proto to follow spec --- protocols/gossipsub/src/generated/gossipsub/pb.rs | 8 ++------ protocols/gossipsub/src/generated/rpc.proto | 3 +-- protocols/gossipsub/src/types.rs | 1 - 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 3aed06a6515..14c3ec8d6cd 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -669,7 +669,6 @@ pub struct PartialMessagesExtension { pub groupID: Option>, pub message: Option, pub iwant: Option, - pub idontwant: Option, pub ihave: Option, } @@ -682,8 +681,7 @@ impl<'a> MessageRead<'a> for PartialMessagesExtension { Ok(18) => msg.groupID = Some(r.read_bytes(bytes)?.to_owned()), Ok(26) => msg.message = Some(r.read_message::(bytes)?), Ok(34) => msg.iwant = Some(r.read_message::(bytes)?), - Ok(42) => msg.idontwant = Some(r.read_message::(bytes)?), - Ok(50) => msg.ihave = Some(r.read_message::(bytes)?), + Ok(42) => msg.ihave = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -699,7 +697,6 @@ impl MessageWrite for PartialMessagesExtension { + self.groupID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + self.message.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + self.iwant.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) - + self.idontwant.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + self.ihave.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) } @@ -708,8 +705,7 @@ impl MessageWrite for PartialMessagesExtension { if let Some(ref s) = self.groupID { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } if let Some(ref s) = self.message { w.write_with_tag(26, |w| w.write_message(s))?; } if let Some(ref s) = self.iwant { w.write_with_tag(34, |w| w.write_message(s))?; } - if let Some(ref s) = self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } - if let Some(ref s) = self.ihave { w.write_with_tag(50, |w| w.write_message(s))?; } + if let Some(ref s) = self.ihave { w.write_with_tag(42, |w| w.write_message(s))?; } Ok(()) } } diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index 2951fe2b283..f571357b9dc 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -114,8 +114,7 @@ message PartialMessagesExtension { optional PartialMessage message = 3; optional PartialIWANT iwant = 4; - optional PartialIDONTWANT idontwant = 5; - optional PartialIHAVE ihave = 6; + optional PartialIHAVE ihave = 5; } message PartialMessage { diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 63d5acbf0d4..b6602ef6eb9 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -603,7 +603,6 @@ impl From for proto::RPC { ihave: ihave.map(|metadata| proto::PartialIHAVE { metadata: Some(metadata), }), - idontwant: None, }), }, } From 69c2d9528cf995e39f07ec4dbbf0dd063fdfa0f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 25 Sep 2025 13:25:43 +0100 Subject: [PATCH 12/68] update trait return types and remove non used impl --- protocols/gossipsub/src/behaviour.rs | 13 +++++---- protocols/gossipsub/src/error.rs | 14 +++------- protocols/gossipsub/src/partial.rs | 41 ++++------------------------ protocols/gossipsub/src/types.rs | 5 ++-- 4 files changed, 20 insertions(+), 53 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index fba9099d9a0..05d1e908f1a 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -796,9 +796,11 @@ where ) -> Result<(), PublishError> { let topic_id = topic.into(); - let available_parts = partial_message.available_parts().map(|p| p.to_vec()); - let missing_parts = partial_message.missing_parts().map(|p| p.to_vec()); - let group_id = partial_message.group_id().to_vec(); + let available_parts = partial_message + .available_parts() + .map(|p| p.as_ref().to_vec()); + let missing_parts = partial_message.missing_parts().map(|p| p.as_ref().to_vec()); + let group_id = partial_message.group_id().as_ref().to_vec(); // TODO: should we construct a recipient list just for partials? let recipient_peers = self.get_publish_peers(&topic_id); @@ -814,8 +816,9 @@ where let peer_partials = peer.partial_messages.entry(topic_id.clone()).or_default(); let peer_partial = peer_partials.entry(group_id.clone()).or_default(); - let Ok((message_data, rest_wanted)) = - partial_message.partial_message_bytes_from_metadata(&peer_partial.wanted) + let Ok((message_data, rest_wanted)) = partial_message + .partial_message_bytes_from_metadata(&peer_partial.wanted) + .map(|(m, r)| (m.as_ref().to_vec(), r.map(|r| r.as_ref().to_vec()))) else { tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 18c8b84205f..fac93210e7d 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -180,10 +180,8 @@ pub enum PartialMessageError { /// The partial data doesn't belong to this message group. WrongGroup { - /// Expected minimum number of bytes. - expected: usize, - /// Actual number of bytes received. - received: usize, + /// Group Id of the received message. + received: Vec, }, /// The partial data is a duplicate of already received data. @@ -214,12 +212,8 @@ impl std::fmt::Display for PartialMessageError { Self::InvalidFormat => { write!(f, "Invalid data format") } - Self::WrongGroup { expected, received } => { - write!( - f, - "Wrong group ID: expected {:?}, got {:?}", - expected, received - ) + Self::WrongGroup { received } => { + write!(f, "Wrong group ID: got {:?}", received) } Self::DuplicateData(part_id) => { write!(f, "Duplicate data for part {:?}", part_id) diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index b8b4fb90b87..b6818eee3a6 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -38,7 +38,7 @@ pub trait Partial { /// All partial messages belonging to the same logical message should return /// the same group ID. This is used to associate partial messages together /// during reconstruction. - fn group_id(&self) -> &[u8]; + fn group_id(&self) -> impl AsRef<[u8]>; /// Returns metadata describing which parts of the message are missing. /// @@ -48,7 +48,7 @@ pub trait Partial { /// /// The returned bytes will be sent in PartialIWANT messages to request /// missing parts from peers. - fn missing_parts(&self) -> Option<&[u8]>; + fn missing_parts(&self) -> Option>; /// Returns metadata describing which parts of the message are available. /// @@ -58,7 +58,7 @@ pub trait Partial { /// /// The returned bytes will be sent in PartialIHAVE messages to advertise /// available parts to peers. - fn available_parts(&self) -> Option<&[u8]>; + fn available_parts(&self) -> Option>; /// Generates partial message bytes from the given metadata. /// @@ -71,8 +71,8 @@ pub trait Partial { /// - Optional remaining metadata if more parts are still available after this one fn partial_message_bytes_from_metadata( &self, - metadata: &[u8], - ) -> Result<(Vec, Option>), PartialMessageError>; + metadata: impl AsRef<[u8]>, + ) -> Result<(impl AsRef<[u8]>, Option>), PartialMessageError>; /// Extends this message with received partial message data. /// @@ -87,34 +87,3 @@ pub trait Partial { data: &[u8], ) -> Result<(), PartialMessageError>; } - -/// Default implementation that disables partial messages. -impl Partial for () { - fn group_id(&self) -> &[u8] { - &[] - } - - fn missing_parts(&self) -> Option<&[u8]> { - None - } - - fn available_parts(&self) -> Option<&[u8]> { - None - } - - fn partial_message_bytes_from_metadata( - &self, - _metadata: &[u8], - ) -> Result<(Vec, Option>), PartialMessageError> { - Ok((vec![], None)) - } - - fn extend_from_encoded_partial_message( - &mut self, - _data: &[u8], - ) -> Result<(), PartialMessageError> { - // This should never be called since we never advertise having or wanting parts, - // but if it is called, just ignore the data silently - Ok(()) - } -} diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index b6602ef6eb9..fffe5a7802c 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -31,6 +31,8 @@ use libp2p_swarm::ConnectionId; use quick_protobuf::MessageWrite; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +#[cfg(feature = "partial_messages")] +use std::collections::HashMap; use web_time::Instant; use crate::{queue::Queue, rpc_proto::proto, TopicHash}; @@ -103,8 +105,7 @@ pub(crate) struct PeerDetails { pub(crate) dont_send: LinkedHashMap, /// Peer Partial messages. #[cfg(feature = "partial_messages")] - pub(crate) partial_messages: - std::collections::HashMap, PartialData>>, + pub(crate) partial_messages: HashMap, PartialData>>, /// Message queue consumed by the connection handler. pub(crate) messages: Queue, } From 45c2bc1d83e685a5972f4e6b862c5e743357e66c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 26 Sep 2025 15:29:41 +0100 Subject: [PATCH 13/68] fixup! feature gate partial messages --- protocols/gossipsub/src/behaviour.rs | 23 +++++++++++++++++++++-- protocols/gossipsub/src/lib.rs | 2 +- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 05d1e908f1a..fc82ec10341 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -146,7 +146,19 @@ pub enum Event { }, /// A new partial message has been received. #[cfg(feature = "partial_messages")] - Partial(crate::types::PartialMessage), + Partial { + topic_id: TopicHash, + /// The peer that forwarded us this message. + propagation_source: PeerId, + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The partial message data. + message: Option>, + /// The partial message iwant. + iwant: Option>, + /// The partial message ihave. + ihave: Option>, + }, /// A remote subscribed to a topic. Subscribed { /// Remote that has subscribed. @@ -1675,7 +1687,14 @@ where } self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial(partial_message))); + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + iwant: partial_message.iwant, + ihave: partial_message.ihave, + })); } /// Removes the specified peer from the mesh, returning true if it was present. diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index 7adeb0e5a87..a0c2d007b5b 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -120,7 +120,7 @@ pub mod partial; pub use metrics::Config as MetricsConfig; #[cfg(feature = "partial_messages")] -pub use self::{error::PartialMessageError, partial::Partial, types::PartialMessage}; +pub use self::{error::PartialMessageError, partial::Partial}; pub use self::{ behaviour::{Behaviour, Event, MessageAuthenticity}, From 85b92f3550c3428df30705ebd60b5d5b83b5ad37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 29 Sep 2025 19:46:57 +0100 Subject: [PATCH 14/68] update to spec changes --- protocols/gossipsub/src/behaviour.rs | 55 +++----- .../gossipsub/src/generated/gossipsub/pb.rs | 129 ++---------------- protocols/gossipsub/src/generated/rpc.proto | 20 +-- protocols/gossipsub/src/partial.rs | 20 +-- protocols/gossipsub/src/protocol.rs | 14 +- protocols/gossipsub/src/queue.rs | 2 +- protocols/gossipsub/src/types.rs | 47 +++---- 7 files changed, 68 insertions(+), 219 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index fc82ec10341..0b6200a59ed 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -155,9 +155,7 @@ pub enum Event { /// The partial message data. message: Option>, /// The partial message iwant. - iwant: Option>, - /// The partial message ihave. - ihave: Option>, + metadata: Option>, }, /// A remote subscribed to a topic. Subscribed { @@ -808,15 +806,12 @@ where ) -> Result<(), PublishError> { let topic_id = topic.into(); - let available_parts = partial_message - .available_parts() - .map(|p| p.as_ref().to_vec()); - let missing_parts = partial_message.missing_parts().map(|p| p.as_ref().to_vec()); + let metadata = partial_message.parts_metadata().as_ref().to_vec(); + let group_id = partial_message.group_id().as_ref().to_vec(); // TODO: should we construct a recipient list just for partials? let recipient_peers = self.get_publish_peers(&topic_id); - let mut publish_failed = true; for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -829,7 +824,7 @@ where let peer_partial = peer_partials.entry(group_id.clone()).or_default(); let Ok((message_data, rest_wanted)) = partial_message - .partial_message_bytes_from_metadata(&peer_partial.wanted) + .partial_message_bytes_from_metadata(&peer_partial.metadata) .map(|(m, r)| (m.as_ref().to_vec(), r.map(|r| r.as_ref().to_vec()))) else { tracing::error!(peer = %peer_id, group_id = ?group_id, @@ -840,10 +835,10 @@ where match rest_wanted { // No new data to send peer. - Some(r) if r == peer_partial.wanted => { + Some(r) if r == peer_partial.metadata => { continue; } - Some(r) => peer_partial.wanted = r, + Some(r) => peer_partial.metadata = r, // Peer partial is now complete // remove it from the list None => { @@ -851,27 +846,21 @@ where } }; - let rpc = PartialMessage { - topic_id: topic_id.clone(), - group_id: group_id.clone(), - iwant: missing_parts.clone(), - ihave: available_parts.clone(), - message: Some(message_data), - }; - - if self.send_message(*peer_id, RpcOut::PartialMessage(rpc.clone())) { - publish_failed = false; - } + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: message_data, + metadata: metadata.clone(), + group_id: group_id.clone(), + topic_id: topic_id.clone(), + }, + ); } if recipient_peers.is_empty() { return Err(PublishError::NoPeersSubscribedToTopic); } - if publish_failed { - return Err(PublishError::AllQueuesFull(recipient_peers.len())); - } - Ok(()) } @@ -1673,17 +1662,12 @@ where .or_default(); // Noop if the received partial is the same we already have. - if partial_message.ihave.as_ref() == Some(&peer_partial.has) - && partial_message.iwant.as_ref() == Some(&peer_partial.wanted) - { + if partial_message.metadata.as_ref() == Some(&peer_partial.metadata) { return; } - if let Some(ref iwant) = partial_message.iwant { - peer_partial.wanted = iwant.clone(); - } - if let Some(ref ihave) = partial_message.ihave { - peer_partial.has = ihave.clone(); + if let Some(ref metadata) = partial_message.metadata { + peer_partial.metadata = metadata.clone(); } self.events @@ -1692,8 +1676,7 @@ where propagation_source: *peer_id, group_id: partial_message.group_id, message: partial_message.message, - iwant: partial_message.iwant, - ihave: partial_message.ihave, + metadata: partial_message.metadata, })); } diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 14c3ec8d6cd..2636d9c7bba 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -667,9 +667,8 @@ impl<'a> From<&'a str> for EncMode { pub struct PartialMessagesExtension { pub topicID: Option>, pub groupID: Option>, - pub message: Option, - pub iwant: Option, - pub ihave: Option, + pub partialMessage: Option>, + pub partsMetadata: Option>, } impl<'a> MessageRead<'a> for PartialMessagesExtension { @@ -679,9 +678,8 @@ impl<'a> MessageRead<'a> for PartialMessagesExtension { match r.next_tag(bytes) { Ok(10) => msg.topicID = Some(r.read_bytes(bytes)?.to_owned()), Ok(18) => msg.groupID = Some(r.read_bytes(bytes)?.to_owned()), - Ok(26) => msg.message = Some(r.read_message::(bytes)?), - Ok(34) => msg.iwant = Some(r.read_message::(bytes)?), - Ok(42) => msg.ihave = Some(r.read_message::(bytes)?), + Ok(26) => msg.partialMessage = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.partsMetadata = Some(r.read_bytes(bytes)?.to_owned()), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -695,126 +693,15 @@ impl MessageWrite for PartialMessagesExtension { 0 + self.topicID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + self.groupID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.message.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) - + self.iwant.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) - + self.ihave.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.partialMessage.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.partsMetadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) } fn write_message(&self, w: &mut Writer) -> Result<()> { if let Some(ref s) = self.topicID { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } if let Some(ref s) = self.groupID { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } - if let Some(ref s) = self.message { w.write_with_tag(26, |w| w.write_message(s))?; } - if let Some(ref s) = self.iwant { w.write_with_tag(34, |w| w.write_message(s))?; } - if let Some(ref s) = self.ihave { w.write_with_tag(42, |w| w.write_message(s))?; } - Ok(()) - } -} - -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, Default, PartialEq, Clone)] -pub struct PartialMessage { - pub data: Option>, -} - -impl<'a> MessageRead<'a> for PartialMessage { - fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { - let mut msg = Self::default(); - while !r.is_eof() { - match r.next_tag(bytes) { - Ok(10) => msg.data = Some(r.read_bytes(bytes)?.to_owned()), - Ok(t) => { r.read_unknown(bytes, t)?; } - Err(e) => return Err(e), - } - } - Ok(msg) - } -} - -impl MessageWrite for PartialMessage { - fn get_size(&self) -> usize { - 0 - + self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - } - - fn write_message(&self, w: &mut Writer) -> Result<()> { - if let Some(ref s) = self.data { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } - Ok(()) - } -} - -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, Default, PartialEq, Clone)] -pub struct PartialIWANT { - pub metadata: Option>, -} - -impl<'a> MessageRead<'a> for PartialIWANT { - fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { - let mut msg = Self::default(); - while !r.is_eof() { - match r.next_tag(bytes) { - Ok(10) => msg.metadata = Some(r.read_bytes(bytes)?.to_owned()), - Ok(t) => { r.read_unknown(bytes, t)?; } - Err(e) => return Err(e), - } - } - Ok(msg) - } -} - -impl MessageWrite for PartialIWANT { - fn get_size(&self) -> usize { - 0 - + self.metadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - } - - fn write_message(&self, w: &mut Writer) -> Result<()> { - if let Some(ref s) = self.metadata { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } - Ok(()) - } -} - -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, Default, PartialEq, Clone)] -pub struct PartialIDONTWANT { } - -impl<'a> MessageRead<'a> for PartialIDONTWANT { - fn from_reader(r: &mut BytesReader, _: &[u8]) -> Result { - r.read_to_end(); - Ok(Self::default()) - } -} - -impl MessageWrite for PartialIDONTWANT { } - -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, Default, PartialEq, Clone)] -pub struct PartialIHAVE { - pub metadata: Option>, -} - -impl<'a> MessageRead<'a> for PartialIHAVE { - fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { - let mut msg = Self::default(); - while !r.is_eof() { - match r.next_tag(bytes) { - Ok(10) => msg.metadata = Some(r.read_bytes(bytes)?.to_owned()), - Ok(t) => { r.read_unknown(bytes, t)?; } - Err(e) => return Err(e), - } - } - Ok(msg) - } -} - -impl MessageWrite for PartialIHAVE { - fn get_size(&self) -> usize { - 0 - + self.metadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - } - - fn write_message(&self, w: &mut Writer) -> Result<()> { - if let Some(ref s) = self.metadata { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.partialMessage { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.partsMetadata { w.write_with_tag(34, |w| w.write_bytes(&**s))?; } Ok(()) } } diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index f571357b9dc..d7b309ffa5d 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -112,21 +112,9 @@ message PartialMessagesExtension { optional bytes topicID = 1; optional bytes groupID = 2; - optional PartialMessage message = 3; - optional PartialIWANT iwant = 4; - optional PartialIHAVE ihave = 5; -} - -message PartialMessage { - optional bytes data = 1; -} - -message PartialIWANT { - optional bytes metadata = 1; -} - -message PartialIDONTWANT {} + // An encoded partial message + optional bytes partialMessage = 3; -message PartialIHAVE { - optional bytes metadata = 1; + // An encoded representation of the parts a peer has and wants. + optional bytes partsMetadata = 4; } diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index b6818eee3a6..61d3b352a7e 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -40,25 +40,13 @@ pub trait Partial { /// during reconstruction. fn group_id(&self) -> impl AsRef<[u8]>; - /// Returns metadata describing which parts of the message are missing. + /// Returns metadata describing which parts of the message are available and which parts we want. /// /// This metadata is application-defined and should encode information about - /// what parts need to be requested from other peers. Returns `None` if the - /// message is complete or if no specific parts can be identified as missing. /// - /// The returned bytes will be sent in PartialIWANT messages to request - /// missing parts from peers. - fn missing_parts(&self) -> Option>; - - /// Returns metadata describing which parts of the message are available. - /// - /// This metadata is application-defined and should encode information about - /// what parts this peer can provide to others. Returns `None` if no parts - /// are available. - /// - /// The returned bytes will be sent in PartialIHAVE messages to advertise - /// available parts to peers. - fn available_parts(&self) -> Option>; + /// The returned bytes will be sent in partsMetadata field to advertise + /// available and wanted parts to peers. + fn parts_metadata(&self) -> impl AsRef<[u8]>; /// Generates partial message bytes from the given metadata. /// diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 82326424cc3..69836b63c73 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -29,15 +29,16 @@ use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::{MessageWrite, Writer}; +#[cfg(feature = "partial_messages")] +use crate::types::PartialMessage; use crate::{ config::ValidationMode, handler::HandlerEvent, rpc_proto::proto, topic::TopicHash, types::{ - ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PartialMessage, - PeerInfo, PeerKind, Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, - TestExtension, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, + Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, TestExtension, }, ValidationError, }; @@ -576,6 +577,7 @@ impl Decoder for GossipsubCodec { control_msgs.push(ControlAction::Extensions(extensions_msg)); } + #[cfg(feature = "partial_messages")] let partial_message = rpc.partial.and_then(|partial_proto| { // Extract topic and group context let Some(topic_id_bytes) = partial_proto.topicID else { @@ -592,9 +594,8 @@ impl Decoder for GossipsubCodec { Some(PartialMessage { topic_id, group_id, - iwant: partial_proto.iwant.and_then(|iwant| iwant.metadata), - ihave: partial_proto.ihave.and_then(|ihave| ihave.metadata), - message: partial_proto.message.and_then(|message| message.data), + metadata: partial_proto.partsMetadata, + message: partial_proto.partialMessage, }) }); @@ -615,6 +616,7 @@ impl Decoder for GossipsubCodec { .collect(), control_msgs, test_extension: rpc.testExtension.map(|_test_extension| TestExtension {}), + #[cfg(feature = "partial_messages")] partial_message, }, invalid_messages, diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs index d95e720357e..0c00808c53b 100644 --- a/protocols/gossipsub/src/queue.rs +++ b/protocols/gossipsub/src/queue.rs @@ -75,7 +75,7 @@ impl Queue { | RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::Extensions(_) - | RpcOut::PartialMessage(_) + | RpcOut::PartialMessage { .. } | RpcOut::TestExtension | RpcOut::IWant(_) => self.non_priority.try_push(message), } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index fffe5a7802c..140090ac1d7 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -114,10 +114,8 @@ pub(crate) struct PeerDetails { #[cfg(feature = "partial_messages")] #[derive(Debug)] pub(crate) struct PartialData { - /// The remaining metada needed by the peer - pub(crate) wanted: Vec, - /// The last partial IHAVE received from the peer. - pub(crate) has: Vec, + /// The current peer partial metadata. + pub(crate) metadata: Vec, /// The remaining heartbeats for this message to be deleted. pub(crate) ttl: usize, } @@ -126,8 +124,7 @@ pub(crate) struct PartialData { impl Default for PartialData { fn default() -> Self { Self { - wanted: Default::default(), - has: Default::default(), + metadata: Default::default(), ttl: 5, } } @@ -335,17 +332,16 @@ pub struct IDontWant { } /// A received partial message. +#[cfg(feature = "partial_messages")] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct PartialMessage { /// The topic ID this partial message belongs to. pub topic_id: TopicHash, /// The group ID that identifies the complete logical message. pub group_id: Vec, - /// The partial parts we want. - pub iwant: Option>, - /// The partial parts we have. - pub ihave: Option>, - /// The partial message itself + /// The partial metadata we have and we want. + pub metadata: Option>, + /// The partial message itself. pub message: Option>, } @@ -396,7 +392,16 @@ pub enum RpcOut { /// Send a test extension message. TestExtension, /// Send a partial messages extension. - PartialMessage(PartialMessage), + PartialMessage { + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The topic ID this partial message belongs to. + topic_id: TopicHash, + /// The partial message itself. + message: Vec, + /// The partial metadata we have and want. + metadata: Vec, + }, } impl RpcOut { @@ -583,13 +588,12 @@ impl From for proto::RPC { testExtension: Some(proto::TestExtension {}), partial: None, }, - RpcOut::PartialMessage(PartialMessage { + RpcOut::PartialMessage { topic_id, group_id, - iwant, - ihave, + metadata, message, - }) => proto::RPC { + } => proto::RPC { subscriptions: vec![], publish: vec![], control: None, @@ -597,13 +601,8 @@ impl From for proto::RPC { partial: Some(proto::PartialMessagesExtension { topicID: Some(topic_id.as_str().as_bytes().to_vec()), groupID: Some(group_id), - message: message.map(|data| proto::PartialMessage { data: Some(data) }), - iwant: iwant.map(|metadata| proto::PartialIWANT { - metadata: Some(metadata), - }), - ihave: ihave.map(|metadata| proto::PartialIHAVE { - metadata: Some(metadata), - }), + partialMessage: Some(message), + partsMetadata: Some(metadata), }), }, } @@ -622,6 +621,7 @@ pub struct RpcIn { /// Gossipsub test extension. pub test_extension: Option, /// Partial messages extension. + #[cfg(feature = "partial_messages")] pub partial_message: Option, } @@ -637,6 +637,7 @@ impl fmt::Debug for RpcIn { if !self.control_msgs.is_empty() { b.field("control_msgs", &self.control_msgs); } + #[cfg(feature = "partial_messages")] b.field("partial_messages", &self.partial_message); b.finish() From b6478d5ec86a7fe31645aecfb0d92d8f65a919c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 29 Sep 2025 22:09:49 +0100 Subject: [PATCH 15/68] update extension message priority to highest --- protocols/gossipsub/src/queue.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs index 0c00808c53b..e067d74c662 100644 --- a/protocols/gossipsub/src/queue.rs +++ b/protocols/gossipsub/src/queue.rs @@ -62,7 +62,7 @@ impl Queue { /// which will only happen for control and non priority messages. pub(crate) fn try_push(&mut self, message: RpcOut) -> Result<(), Box> { match message { - RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { + RpcOut::Extensions(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { self.priority .try_push(message) .expect("Shared is unbounded"); @@ -74,7 +74,6 @@ impl Queue { RpcOut::Publish { .. } | RpcOut::Forward { .. } | RpcOut::IHave(_) - | RpcOut::Extensions(_) | RpcOut::PartialMessage { .. } | RpcOut::TestExtension | RpcOut::IWant(_) => self.non_priority.try_push(message), From 5e98f1a1689eb74d119fe816b7ed0b4a3ee2d655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 29 Sep 2025 22:13:36 +0100 Subject: [PATCH 16/68] add missing proto field SubOpts partial --- protocols/gossipsub/src/generated/gossipsub/pb.rs | 4 ++++ protocols/gossipsub/src/generated/rpc.proto | 4 ++++ protocols/gossipsub/src/types.rs | 2 ++ 3 files changed, 10 insertions(+) diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 2636d9c7bba..72c674f9fdf 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -70,6 +70,7 @@ use super::*; pub struct SubOpts { pub subscribe: Option, pub topic_id: Option, + pub partial: Option, } impl<'a> MessageRead<'a> for SubOpts { @@ -79,6 +80,7 @@ impl<'a> MessageRead<'a> for SubOpts { match r.next_tag(bytes) { Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?), Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(24) => msg.partial = Some(r.read_bool(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -92,11 +94,13 @@ impl MessageWrite for SubOpts { 0 + self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.partial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) } fn write_message(&self, w: &mut Writer) -> Result<()> { if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; } if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.partial { w.write_with_tag(24, |w| w.write_bool(*s))?; } Ok(()) } } diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index d7b309ffa5d..bb908db2756 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -9,6 +9,10 @@ message RPC { message SubOpts { optional bool subscribe = 1; // subscribe or unsubscribe optional string topic_id = 2; + // Used with Partial Messages extension. + // If set, the receiver of this message MUST send partial messages to the + // sender instead of full messages. + optional bool partial = 3; } optional ControlMessage control = 3; diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 140090ac1d7..0758b648c5d 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -447,6 +447,7 @@ impl From for proto::RPC { subscriptions: vec![proto::SubOpts { subscribe: Some(true), topic_id: Some(topic.into_string()), + partial: None, }], control: None, testExtension: None, @@ -457,6 +458,7 @@ impl From for proto::RPC { subscriptions: vec![proto::SubOpts { subscribe: Some(false), topic_id: Some(topic.into_string()), + partial: None, }], control: None, testExtension: None, From f77490cc0a580f133bc9fbeb94c83faefd57369a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 30 Sep 2025 16:04:42 +0100 Subject: [PATCH 17/68] implement subscription options partial --- protocols/gossipsub/src/behaviour.rs | 52 +++-- protocols/gossipsub/src/behaviour/tests.rs | 215 ++++++++++++++++-- protocols/gossipsub/src/protocol.rs | 2 + .../gossipsub/src/subscription_filter.rs | 44 ++++ protocols/gossipsub/src/topic.rs | 18 +- protocols/gossipsub/src/types.rs | 7 +- 6 files changed, 292 insertions(+), 46 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 0b6200a59ed..37d3974c216 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -65,7 +65,7 @@ use crate::{ rpc_proto::proto, subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, time_cache::DuplicateCache, - topic::{Hasher, Topic, TopicHash}, + topic::{Hasher, SubscribedTopic, Topic, TopicHash}, transform::{DataTransform, IdentityTransform}, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, @@ -509,7 +509,7 @@ where pub fn all_peers(&self) -> impl Iterator)> { self.connected_peers .iter() - .map(|(peer_id, peer)| (peer_id, peer.topics.iter().collect())) + .map(|(peer_id, peer)| (peer_id, peer.topics.iter().map(|s| &s.topic).collect())) } /// Lists all known peers and their associated protocol. @@ -1458,7 +1458,9 @@ where // For each topic, if a peer has grafted us, then we necessarily must be in their mesh // and they must be subscribed to the topic. Ensure we have recorded the mapping. for topic in &topics { - if connected_peer.topics.insert(topic.clone()) { + let mut subscribed = SubscribedTopic::default(); + subscribed.topic = topic.clone(); + if connected_peer.topics.insert(subscribed) { #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { m.inc_topic_peers(topic); @@ -2074,10 +2076,10 @@ where // Notify the application about the subscription, after the grafts are sent. let mut application_event = Vec::new(); - let filtered_topics = match self - .subscription_filter - .filter_incoming_subscriptions(subscriptions, &peer.topics) - { + let filtered_topics = match self.subscription_filter.filter_incoming_subscriptions( + subscriptions, + &peer.topics.iter().map(|s| s.topic.clone()).collect(), + ) { Ok(topics) => topics, Err(s) => { tracing::error!( @@ -2095,7 +2097,13 @@ where match subscription.action { SubscriptionAction::Subscribe => { - if peer.topics.insert(topic_hash.clone()) { + let mut subscribed_topic = SubscribedTopic::default(); + subscribed_topic.topic = topic_hash.clone(); + #[cfg(feature = "partial_messages")] + { + subscribed_topic.partial = subscription.partial; + } + if peer.topics.insert(subscribed_topic) { tracing::debug!( peer=%propagation_source, topic=%topic_hash, @@ -3151,8 +3159,8 @@ where // If there are more connections and this peer is in a mesh, inform the first // connection handler. if !peer.connections.is_empty() { - for topic in &peer.topics { - if let Some(mesh_peers) = self.mesh.get(topic) { + for subscribed_topic in &peer.topics { + if let Some(mesh_peers) = self.mesh.get(&subscribed_topic.topic) { if mesh_peers.contains(&peer_id) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, @@ -3174,27 +3182,27 @@ where }; // remove peer from all mappings - for topic in &connected_peer.topics { + for subscribed_topic in &connected_peer.topics { // check the mesh for the topic - if let Some(mesh_peers) = self.mesh.get_mut(topic) { + if let Some(mesh_peers) = self.mesh.get_mut(&subscribed_topic.topic) { // check if the peer is in the mesh and remove it if mesh_peers.remove(&peer_id) { #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.peers_removed(topic, Churn::Dc, 1); - m.set_mesh_peers(topic, mesh_peers.len()); + m.peers_removed(&subscribed_topic.topic, Churn::Dc, 1); + m.set_mesh_peers(&subscribed_topic.topic, mesh_peers.len()); } }; } #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.dec_topic_peers(topic); + m.dec_topic_peers(&subscribed_topic.topic); } // remove from fanout self.fanout - .get_mut(topic) + .get_mut(&subscribed_topic.topic) .map(|peers| peers.remove(&peer_id)); } @@ -3623,9 +3631,9 @@ fn peer_added_to_mesh( }; if let Some(peer) = connections.get(&peer_id) { - for topic in &peer.topics { - if !new_topics.contains(&topic) { - if let Some(mesh_peers) = mesh.get(topic) { + for subscribed_topic in &peer.topics { + if !new_topics.contains(&&subscribed_topic.topic) { + if let Some(mesh_peers) = mesh.get(&subscribed_topic.topic) { if mesh_peers.contains(&peer_id) { // the peer is already in a mesh for another topic return; @@ -3665,9 +3673,9 @@ fn peer_removed_from_mesh( }; if let Some(peer) = connections.get(&peer_id) { - for topic in &peer.topics { - if topic != old_topic { - if let Some(mesh_peers) = mesh.get(topic) { + for subscribed_topic in &peer.topics { + if &subscribed_topic.topic != old_topic { + if let Some(mesh_peers) = mesh.get(&subscribed_topic.topic) { if mesh_peers.contains(&peer_id) { // the peer exists in another mesh still return; diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 931ee20dab4..6d74e56e3ad 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -33,6 +33,7 @@ use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, subscription_filter::WhitelistSubscriptionFilter, + topic::SubscribedTopic, types::{ControlAction, Extensions, RpcIn, RpcOut}, IdentTopic as Topic, }; @@ -173,8 +174,6 @@ fn inject_nodes1() -> InjectNodes InjectNodes::::default() } -// helper functions for testing - fn add_peer( gs: &mut Behaviour, topic_hashes: &[TopicHash], @@ -248,6 +247,7 @@ where messages: queue, dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -277,6 +277,8 @@ where .map(|t| Subscription { action: SubscriptionAction::Subscribe, topic_hash: t, + #[cfg(feature = "partial_messages")] + partial: false, }) .collect::>(), &peer, @@ -416,10 +418,13 @@ fn proto_to_message(rpc: &proto::RPC) -> RpcIn { SubscriptionAction::Unsubscribe }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + #[cfg(feature = "partial_messages")] + partial: false, }) .collect(), control_msgs, test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, } } @@ -638,6 +643,7 @@ fn test_join() { messages: queue, dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -886,7 +892,19 @@ fn test_inject_connected() { for peer in peers { let peer = gs.connected_peers.get(&peer).unwrap(); assert!( - peer.topics == topic_hashes.iter().cloned().collect(), + peer.topics + == topic_hashes + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), "The topics for each node should all topics" ); } @@ -917,12 +935,16 @@ fn test_handle_received_subscriptions() { .map(|topic_hash| Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }) .collect::>(); subscriptions.push(Subscription { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }); let unknown_peer = PeerId::random(); @@ -941,7 +963,15 @@ fn test_handle_received_subscriptions() { == topic_hashes .iter() .take(3) - .cloned() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) .collect::>(), "First peer should be subscribed to three topics" ); @@ -951,7 +981,15 @@ fn test_handle_received_subscriptions() { == topic_hashes .iter() .take(3) - .cloned() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) .collect::>(), "Second peer should be subscribed to three topics" ); @@ -965,7 +1003,7 @@ fn test_handle_received_subscriptions() { let topic_peers = gs .connected_peers .iter() - .filter(|(_, p)| p.topics.contains(topic_hash)) + .filter(|(_, p)| p.topics.iter().any(|st| &st.topic == topic_hash)) .map(|(peer_id, _)| *peer_id) .collect::>(); assert!( @@ -980,13 +1018,27 @@ fn test_handle_received_subscriptions() { &[Subscription { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[0].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }], &peers[0], ); let peer = gs.connected_peers.get(&peers[0]).unwrap(); assert!( - peer.topics == topic_hashes[1..3].iter().cloned().collect::>(), + peer.topics + == topic_hashes[1..3] + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect::>(), "Peer should be subscribed to two topics" ); @@ -994,7 +1046,7 @@ fn test_handle_received_subscriptions() { let topic_peers = gs .connected_peers .iter() - .filter(|(_, p)| p.topics.contains(&topic_hashes[0])) + .filter(|(_, p)| p.topics.iter().any(|st| st.topic == topic_hashes[0])) .map(|(peer_id, _)| *peer_id) .collect::>(); @@ -1030,10 +1082,19 @@ fn test_get_random_peers() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -1238,6 +1299,7 @@ fn test_handle_iwant_msg_but_already_sent_idontwant() { message_ids: vec![msg_id.clone()], })], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }; gs.on_connection_handler_event( @@ -1711,6 +1773,8 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { &[Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }], peer, ); @@ -1759,6 +1823,8 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { &[Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }], peer, ); @@ -3083,6 +3149,8 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { let subscription = Subscription { action: SubscriptionAction::Subscribe, topic_hash: topics[0].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }; let control_action = ControlAction::IHave(IHave { @@ -3103,6 +3171,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }, invalid_messages: Vec::new(), @@ -3131,6 +3200,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { subscriptions: vec![subscription], control_msgs: vec![control_action], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }, invalid_messages: Vec::new(), @@ -3743,6 +3813,7 @@ fn test_scoring_p4_invalid_signature() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }, invalid_messages: vec![(m, ValidationError::InvalidSignature)], @@ -5499,6 +5570,7 @@ fn parses_idontwant() { message_ids: vec![message_id.clone()], })], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }; gs.on_connection_handler_event( @@ -5556,10 +5628,19 @@ fn test_all_queues_full() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5595,10 +5676,19 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5610,10 +5700,19 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5664,10 +5763,19 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5683,10 +5791,19 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5773,10 +5890,19 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5792,10 +5918,19 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5862,10 +5997,19 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5881,10 +6025,19 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5956,10 +6109,19 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -5972,10 +6134,19 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.clone(), + topics: topics.iter().map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }).collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -6536,6 +6707,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }, invalid_messages: vec![], @@ -6644,6 +6816,7 @@ fn test_validation_message_size_within_topic_specific() { subscriptions: vec![], control_msgs: vec![], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }, invalid_messages: vec![], @@ -6753,6 +6926,7 @@ fn test_handle_extensions_message() { messages, dont_send: LinkedHashMap::new(), extensions: None, + #[cfg(feature = "partial_messages")] partial_messages: Default::default(), }, ); @@ -6777,6 +6951,7 @@ fn test_handle_extensions_message() { partial_messages: None, }))], test_extension: None, + #[cfg(feature = "partial_messages")] partial_message: None, }; diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 69836b63c73..09df77deb04 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -612,6 +612,8 @@ impl Decoder for GossipsubCodec { SubscriptionAction::Unsubscribe }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + #[cfg(feature = "partial_messages")] + partial: sub.partial.unwrap_or_default(), }) .collect(), control_msgs, diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index c051b6c333b..893c5eefe21 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -225,22 +225,32 @@ mod test { Subscription { action: Unsubscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Unsubscribe, topic_hash: t2.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Unsubscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -262,10 +272,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -291,14 +305,20 @@ mod test { Subscription { action: Subscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Unsubscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -324,10 +344,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t[2].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t[3].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -353,22 +377,32 @@ mod test { Subscription { action: Subscribe, topic_hash: t[4].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t[2].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t[3].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Unsubscribe, topic_hash: t[0].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Unsubscribe, topic_hash: t[1].clone(), + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -390,10 +424,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial: false, }, ]; @@ -416,14 +454,20 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial: false, }, Subscription { action: Subscribe, topic_hash: t3, + #[cfg(feature = "partial_messages")] + partial: false, }, ]; diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index 53e9fe2c172..2c7d851574e 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::fmt; +use std::{borrow::Borrow, fmt}; use base64::prelude::*; use quick_protobuf::Writer; @@ -65,7 +65,21 @@ impl Hasher for Sha256Hash { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +/// Topic subscribed by a peer. +#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub(crate) struct SubscribedTopic { + pub(crate) topic: TopicHash, + #[cfg(feature = "partial_messages")] + pub(crate) partial: bool, +} + +impl Borrow for SubscribedTopic { + fn borrow(&self) -> &TopicHash { + &self.topic + } +} + +#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr( feature = "metrics", derive(prometheus_client::encoding::EncodeLabelSet) diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 0758b648c5d..4a9357037e9 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -35,7 +35,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use web_time::Instant; -use crate::{queue::Queue, rpc_proto::proto, TopicHash}; +use crate::{queue::Queue, rpc_proto::proto, topic::SubscribedTopic, TopicHash}; /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default, PartialEq, Eq)] @@ -100,7 +100,7 @@ pub(crate) struct PeerDetails { /// Its current connections. pub(crate) connections: Vec, /// Subscribed topics. - pub(crate) topics: BTreeSet, + pub(crate) topics: BTreeSet, /// Don't send messages. pub(crate) dont_send: LinkedHashMap, /// Peer Partial messages. @@ -252,6 +252,9 @@ pub struct Subscription { pub action: SubscriptionAction, /// The topic from which to subscribe or unsubscribe. pub topic_hash: TopicHash, + /// Peer only wants to receive partial messages instead of full messages. + #[cfg(feature = "partial_messages")] + pub partial: bool, } /// Action that a subscription wants to perform. From 1bfbc55821cc78093b63c3696fd16a91379c8049 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 30 Sep 2025 18:12:35 +0100 Subject: [PATCH 18/68] publish only full messages to partial false peers --- protocols/gossipsub/src/behaviour.rs | 99 +++++--- protocols/gossipsub/src/behaviour/tests.rs | 264 ++++++++++++--------- 2 files changed, 222 insertions(+), 141 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 37d3974c216..1de74065c07 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -582,14 +582,23 @@ where } // Get Peers from the mesh or fanout to publish a message to. - fn get_publish_peers(&mut self, topic_hash: &TopicHash) -> HashSet { + // If partial set, filter out peers who only want partial messages for the topic. + fn get_publish_peers(&mut self, topic_hash: &TopicHash, partial: bool) -> HashSet { let mesh_n = self.config.mesh_n_for_topic(topic_hash); let peers_on_topic = self .connected_peers .iter() - .filter(|(_, p)| p.topics.contains(topic_hash)) - .map(|(peer_id, _)| peer_id) + .filter_map(|(peer_id, peer)| { + let _subscribed_topic = peer.topics.get(topic_hash)?; + #[cfg(feature = "partial_messages")] + { + if partial && _subscribed_topic.partial { + return None; + } + } + Some(peer_id) + }) .peekable(); let mut recipient_peers = HashSet::new(); @@ -619,6 +628,7 @@ where &self.connected_peers, topic_hash, needed_extra_peers, + partial, |peer| { !mesh_peers.contains(peer) && !self.explicit_peers.contains(peer) @@ -648,16 +658,19 @@ where } } else { // We have no fanout peers, select mesh_n of them and add them to the fanout - let new_peers = - get_random_peers(&self.connected_peers, topic_hash, mesh_n, { - |p| { - !self.explicit_peers.contains(p) - && !self - .peer_score - .below_threshold(p, |ts| ts.publish_threshold) - .0 - } - }); + let new_peers = get_random_peers( + &self.connected_peers, + topic_hash, + mesh_n, + partial, + |p| { + !self.explicit_peers.contains(p) + && !self + .peer_score + .below_threshold(p, |ts| ts.publish_threshold) + .0 + }, + ); // Add the new peers to the fanout and recipient peers self.fanout.insert(topic_hash.clone(), new_peers.clone()); for peer in new_peers { @@ -741,7 +754,7 @@ where let topic_hash = raw_message.topic.clone(); - let recipient_peers = self.get_publish_peers(&topic_hash); + let recipient_peers = self.get_publish_peers(&topic_hash, true); // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. @@ -811,7 +824,7 @@ where let group_id = partial_message.group_id().as_ref().to_vec(); // TODO: should we construct a recipient list just for partials? - let recipient_peers = self.get_publish_peers(&topic_id); + let recipient_peers = self.get_publish_peers(&topic_id, false); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -1106,6 +1119,7 @@ where &self.connected_peers, topic_hash, mesh_n - added_peers.len(), + true, |peer| { !added_peers.contains(peer) && !self.explicit_peers.contains(peer) @@ -1200,6 +1214,7 @@ where &self.connected_peers, topic_hash, self.config.prune_peers(), + true, |p| p != peer && !self.peer_score.below_threshold(p, |_| 0.0).0, ) .into_iter() @@ -2352,13 +2367,18 @@ where ); // not enough peers - get mesh_n - current_length more let desired_peers = mesh_n - peers.len(); - let peer_list = - get_random_peers(&self.connected_peers, topic_hash, desired_peers, |peer| { + let peer_list = get_random_peers( + &self.connected_peers, + topic_hash, + desired_peers, + true, + |peer| { !peers.contains(peer) && !explicit_peers.contains(peer) && !backoffs.is_backoff_with_slack(topic_hash, peer) && scores.get(peer).map(|r| r.score).unwrap_or_default() >= 0.0 - }); + }, + ); for peer in &peer_list { let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); @@ -2453,8 +2473,12 @@ where // if we have not enough outbound peers, graft to some new outbound peers if outbound < mesh_outbound_min { let needed = mesh_outbound_min - outbound; - let peer_list = - get_random_peers(&self.connected_peers, topic_hash, needed, |peer_id| { + let peer_list = get_random_peers( + &self.connected_peers, + topic_hash, + needed, + false, + |peer_id| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !backoffs.is_backoff_with_slack(topic_hash, peer_id) @@ -2463,7 +2487,8 @@ where .connected_peers .get(peer_id) .is_some_and(|peer| peer.outbound) - }); + }, + ); for peer in &peer_list { let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); @@ -2529,6 +2554,7 @@ where &self.connected_peers, topic_hash, self.config.opportunistic_graft_peers(), + false, |peer_id| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) @@ -2621,15 +2647,20 @@ where ); let needed_peers = mesh_n - peers.len(); let explicit_peers = &self.explicit_peers; - let new_peers = - get_random_peers(&self.connected_peers, topic_hash, needed_peers, |peer_id| { + let new_peers = get_random_peers( + &self.connected_peers, + topic_hash, + needed_peers, + false, + |peer_id| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !self .peer_score .below_threshold(peer_id, |ts| ts.publish_threshold) .0 - }); + }, + ); peers.extend(new_peers); } } @@ -2736,7 +2767,7 @@ where }; // get gossip_lazy random peers let to_msg_peers = - get_random_peers_dynamic(&self.connected_peers, topic_hash, n_map, |peer| { + get_random_peers_dynamic(&self.connected_peers, topic_hash, false, n_map, |peer| { !peers.contains(peer) && !self.explicit_peers.contains(peer) && !self @@ -3695,16 +3726,28 @@ fn peer_removed_from_mesh( /// Helper function to get a subset of random gossipsub peers for a `topic_hash` /// filtered by the function `f`. The number of peers to get equals the output of `n_map` /// that gets as input the number of filtered peers. +#[allow(unused, reason = "partial is used with partial_messages feature")] fn get_random_peers_dynamic( connected_peers: &HashMap, topic_hash: &TopicHash, + // If we want to filter for partial only peers. + partial: bool, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, mut f: impl FnMut(&PeerId) -> bool, ) -> BTreeSet { let mut gossip_peers = connected_peers .iter() - .filter(|(_, p)| p.topics.contains(topic_hash)) + .filter_map(|(peer_id, peer)| { + let subscribed_topic = peer.topics.get(topic_hash)?; + #[cfg(feature = "partial_messages")] + { + if partial && subscribed_topic.partial { + return None; + } + } + Some((peer_id, peer)) + }) .filter(|(peer_id, _)| f(peer_id)) .filter(|(_, p)| p.kind.is_gossipsub()) .map(|(peer_id, _)| *peer_id) @@ -3728,13 +3771,15 @@ fn get_random_peers_dynamic( /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` /// filtered by the function `f`. +#[allow(unused, reason = "partial is used with partial_messages feature")] fn get_random_peers( connected_peers: &HashMap, topic_hash: &TopicHash, n: usize, + partial: bool, f: impl FnMut(&PeerId) -> bool, ) -> BTreeSet { - get_random_peers_dynamic(connected_peers, topic_hash, |_| n, f) + get_random_peers_dynamic(connected_peers, topic_hash, partial, |_| n, f) } /// Validates the combination of signing, privacy and message validation to ensure the diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 6d74e56e3ad..60f45b5d9d5 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -1082,15 +1082,18 @@ fn test_get_random_peers() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, @@ -1100,26 +1103,26 @@ fn test_get_random_peers() { ); } - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, false, |_| true); assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, false, |_| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, false, |_| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, false, |_| true); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); // test the filter - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| false); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, false, |_| false); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, { + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, false, { |peer| peers.contains(peer) }); assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); @@ -5628,15 +5631,18 @@ fn test_all_queues_full() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -5676,15 +5682,18 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -5700,15 +5709,18 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, @@ -5763,15 +5775,18 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -5791,15 +5806,18 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, @@ -5890,15 +5908,18 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -5918,15 +5939,18 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, @@ -5997,15 +6021,18 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -6025,15 +6052,18 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, @@ -6109,15 +6139,18 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, @@ -6134,15 +6167,18 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics.iter().map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }).collect(), + topics: topics + .iter() + .map(|t| { + let mut st = SubscribedTopic::default(); + st.topic = t.clone(); + #[cfg(feature = "partial_messages")] + { + st.partial = false; + } + st + }) + .collect(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, From 33783bd6c8b2bee0b91cda794d33b54855788e47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 1 Oct 2025 12:43:00 +0100 Subject: [PATCH 19/68] revert to only using TopicHash, and create a paralel list for the partial_only topics. --- protocols/gossipsub/src/behaviour.rs | 64 +++--- protocols/gossipsub/src/behaviour/tests.rs | 239 +++++---------------- protocols/gossipsub/src/topic.rs | 16 +- protocols/gossipsub/src/types.rs | 7 +- 4 files changed, 86 insertions(+), 240 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 1de74065c07..894960627f4 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -65,7 +65,7 @@ use crate::{ rpc_proto::proto, subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, time_cache::DuplicateCache, - topic::{Hasher, SubscribedTopic, Topic, TopicHash}, + topic::{Hasher, Topic, TopicHash}, transform::{DataTransform, IdentityTransform}, types::{ ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, @@ -509,7 +509,7 @@ where pub fn all_peers(&self) -> impl Iterator)> { self.connected_peers .iter() - .map(|(peer_id, peer)| (peer_id, peer.topics.iter().map(|s| &s.topic).collect())) + .map(|(peer_id, peer)| (peer_id, peer.topics.iter().collect())) } /// Lists all known peers and their associated protocol. @@ -589,11 +589,10 @@ where let peers_on_topic = self .connected_peers .iter() - .filter_map(|(peer_id, peer)| { - let _subscribed_topic = peer.topics.get(topic_hash)?; + .filter_map(|(peer_id, _peer)| { #[cfg(feature = "partial_messages")] { - if partial && _subscribed_topic.partial { + if partial && _peer.partial_only_topics.contains(topic_hash) { return None; } } @@ -1473,9 +1472,7 @@ where // For each topic, if a peer has grafted us, then we necessarily must be in their mesh // and they must be subscribed to the topic. Ensure we have recorded the mapping. for topic in &topics { - let mut subscribed = SubscribedTopic::default(); - subscribed.topic = topic.clone(); - if connected_peer.topics.insert(subscribed) { + if connected_peer.topics.insert(topic.clone()) { #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { m.inc_topic_peers(topic); @@ -2091,10 +2088,10 @@ where // Notify the application about the subscription, after the grafts are sent. let mut application_event = Vec::new(); - let filtered_topics = match self.subscription_filter.filter_incoming_subscriptions( - subscriptions, - &peer.topics.iter().map(|s| s.topic.clone()).collect(), - ) { + let filtered_topics = match self + .subscription_filter + .filter_incoming_subscriptions(subscriptions, &peer.topics.iter().cloned().collect()) + { Ok(topics) => topics, Err(s) => { tracing::error!( @@ -2112,13 +2109,13 @@ where match subscription.action { SubscriptionAction::Subscribe => { - let mut subscribed_topic = SubscribedTopic::default(); - subscribed_topic.topic = topic_hash.clone(); #[cfg(feature = "partial_messages")] { - subscribed_topic.partial = subscription.partial; + if subscription.partial { + peer.partial_only_topics.insert(topic_hash.clone()); + } } - if peer.topics.insert(subscribed_topic) { + if peer.topics.insert(topic_hash.clone()) { tracing::debug!( peer=%propagation_source, topic=%topic_hash, @@ -3190,8 +3187,8 @@ where // If there are more connections and this peer is in a mesh, inform the first // connection handler. if !peer.connections.is_empty() { - for subscribed_topic in &peer.topics { - if let Some(mesh_peers) = self.mesh.get(&subscribed_topic.topic) { + for topic in &peer.topics { + if let Some(mesh_peers) = self.mesh.get(&topic) { if mesh_peers.contains(&peer_id) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, @@ -3213,27 +3210,27 @@ where }; // remove peer from all mappings - for subscribed_topic in &connected_peer.topics { + for topic in &connected_peer.topics { // check the mesh for the topic - if let Some(mesh_peers) = self.mesh.get_mut(&subscribed_topic.topic) { + if let Some(mesh_peers) = self.mesh.get_mut(&topic) { // check if the peer is in the mesh and remove it if mesh_peers.remove(&peer_id) { #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.peers_removed(&subscribed_topic.topic, Churn::Dc, 1); - m.set_mesh_peers(&subscribed_topic.topic, mesh_peers.len()); + m.peers_removed(&topic, Churn::Dc, 1); + m.set_mesh_peers(&topic, mesh_peers.len()); } }; } #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.dec_topic_peers(&subscribed_topic.topic); + m.dec_topic_peers(&topic); } // remove from fanout self.fanout - .get_mut(&subscribed_topic.topic) + .get_mut(&topic) .map(|peers| peers.remove(&peer_id)); } @@ -3333,6 +3330,8 @@ where extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3378,6 +3377,8 @@ where extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3662,9 +3663,9 @@ fn peer_added_to_mesh( }; if let Some(peer) = connections.get(&peer_id) { - for subscribed_topic in &peer.topics { - if !new_topics.contains(&&subscribed_topic.topic) { - if let Some(mesh_peers) = mesh.get(&subscribed_topic.topic) { + for topic in &peer.topics { + if !new_topics.contains(&topic) { + if let Some(mesh_peers) = mesh.get(&topic) { if mesh_peers.contains(&peer_id) { // the peer is already in a mesh for another topic return; @@ -3704,9 +3705,9 @@ fn peer_removed_from_mesh( }; if let Some(peer) = connections.get(&peer_id) { - for subscribed_topic in &peer.topics { - if &subscribed_topic.topic != old_topic { - if let Some(mesh_peers) = mesh.get(&subscribed_topic.topic) { + for topic in &peer.topics { + if topic != old_topic { + if let Some(mesh_peers) = mesh.get(&topic) { if mesh_peers.contains(&peer_id) { // the peer exists in another mesh still return; @@ -3739,10 +3740,9 @@ fn get_random_peers_dynamic( let mut gossip_peers = connected_peers .iter() .filter_map(|(peer_id, peer)| { - let subscribed_topic = peer.topics.get(topic_hash)?; #[cfg(feature = "partial_messages")] { - if partial && subscribed_topic.partial { + if partial && peer.partial_only_topics.contains(topic_hash) { return None; } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 60f45b5d9d5..a2ac811ec8f 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -33,7 +33,6 @@ use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, subscription_filter::WhitelistSubscriptionFilter, - topic::SubscribedTopic, types::{ControlAction, Extensions, RpcIn, RpcOut}, IdentTopic as Topic, }; @@ -249,6 +248,8 @@ where extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -645,6 +646,8 @@ fn test_join() { extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); queues.insert(random_peer, receiver_queue); @@ -892,19 +895,7 @@ fn test_inject_connected() { for peer in peers { let peer = gs.connected_peers.get(&peer).unwrap(); assert!( - peer.topics - == topic_hashes - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + peer.topics == topic_hashes.iter().cloned().collect(), "The topics for each node should all topics" ); } @@ -963,15 +954,7 @@ fn test_handle_received_subscriptions() { == topic_hashes .iter() .take(3) - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) + .cloned() .collect::>(), "First peer should be subscribed to three topics" ); @@ -981,15 +964,7 @@ fn test_handle_received_subscriptions() { == topic_hashes .iter() .take(3) - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) + .cloned() .collect::>(), "Second peer should be subscribed to three topics" ); @@ -1003,7 +978,7 @@ fn test_handle_received_subscriptions() { let topic_peers = gs .connected_peers .iter() - .filter(|(_, p)| p.topics.iter().any(|st| &st.topic == topic_hash)) + .filter(|(_, p)| p.topics.contains(topic_hash)) .map(|(peer_id, _)| *peer_id) .collect::>(); assert!( @@ -1026,19 +1001,7 @@ fn test_handle_received_subscriptions() { let peer = gs.connected_peers.get(&peers[0]).unwrap(); assert!( - peer.topics - == topic_hashes[1..3] - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect::>(), + peer.topics == topic_hashes[1..3].iter().cloned().collect::>(), "Peer should be subscribed to two topics" ); @@ -1046,7 +1009,7 @@ fn test_handle_received_subscriptions() { let topic_peers = gs .connected_peers .iter() - .filter(|(_, p)| p.topics.iter().any(|st| st.topic == topic_hashes[0])) + .filter(|(_, p)| p.topics.contains(&topic_hashes[0])) .map(|(peer_id, _)| *peer_id) .collect::>(); @@ -1082,23 +1045,14 @@ fn test_get_random_peers() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); } @@ -5631,23 +5585,14 @@ fn test_all_queues_full() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -5682,23 +5627,14 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); let peer_id = PeerId::random(); @@ -5709,23 +5645,14 @@ fn test_slow_peer_returns_failed_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -5775,23 +5702,14 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); peers.push(slow_peer_id); @@ -5806,23 +5724,14 @@ fn test_slow_peer_returns_failed_ihave_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -5908,23 +5817,14 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); peers.push(slow_peer_id); @@ -5939,23 +5839,14 @@ fn test_slow_peer_returns_failed_iwant_handling() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -6021,23 +5912,14 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); peers.push(slow_peer_id); @@ -6052,23 +5934,14 @@ fn test_slow_peer_returns_failed_forward() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -6139,23 +6012,14 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); gs.as_peer_score_mut().add_peer(slow_peer_id); @@ -6167,23 +6031,14 @@ fn test_slow_peer_is_downscored_on_publish() { kind: PeerKind::Gossipsubv1_1, connections: vec![ConnectionId::new_unchecked(0)], outbound: false, - topics: topics - .iter() - .map(|t| { - let mut st = SubscribedTopic::default(); - st.topic = t.clone(); - #[cfg(feature = "partial_messages")] - { - st.partial = false; - } - st - }) - .collect(), + topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); @@ -6964,6 +6819,8 @@ fn test_handle_extensions_message() { extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index 2c7d851574e..adbdf58637f 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -18,7 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::{borrow::Borrow, fmt}; +use std::fmt; use base64::prelude::*; use quick_protobuf::Writer; @@ -65,20 +65,6 @@ impl Hasher for Sha256Hash { } } -/// Topic subscribed by a peer. -#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub(crate) struct SubscribedTopic { - pub(crate) topic: TopicHash, - #[cfg(feature = "partial_messages")] - pub(crate) partial: bool, -} - -impl Borrow for SubscribedTopic { - fn borrow(&self) -> &TopicHash { - &self.topic - } -} - #[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr( feature = "metrics", diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 4a9357037e9..9cd820ff280 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -35,7 +35,7 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use web_time::Instant; -use crate::{queue::Queue, rpc_proto::proto, topic::SubscribedTopic, TopicHash}; +use crate::{queue::Queue, rpc_proto::proto, TopicHash}; /// Messages that have expired while attempting to be sent to a peer. #[derive(Clone, Debug, Default, PartialEq, Eq)] @@ -100,12 +100,15 @@ pub(crate) struct PeerDetails { /// Its current connections. pub(crate) connections: Vec, /// Subscribed topics. - pub(crate) topics: BTreeSet, + pub(crate) topics: BTreeSet, /// Don't send messages. pub(crate) dont_send: LinkedHashMap, /// Peer Partial messages. #[cfg(feature = "partial_messages")] pub(crate) partial_messages: HashMap, PartialData>>, + /// Partial only subscribed topics. + #[cfg(feature = "partial_messages")] + pub(crate) partial_only_topics: BTreeSet, /// Message queue consumed by the connection handler. pub(crate) messages: Queue, } From 438a297e1e62dba28f6d60234ca09717b82c8fc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 1 Oct 2025 14:35:02 +0100 Subject: [PATCH 20/68] add partial option to subscribe and unsubscribe methods --- protocols/gossipsub/src/behaviour.rs | 48 ++++++++++++++++++---- protocols/gossipsub/src/behaviour/tests.rs | 26 ++++++------ protocols/gossipsub/src/queue.rs | 2 +- protocols/gossipsub/src/types.rs | 17 ++++++-- protocols/gossipsub/tests/smoke.rs | 2 +- 5 files changed, 70 insertions(+), 25 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 894960627f4..ab740137d34 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -303,6 +303,10 @@ pub struct Behaviour { /// Overlay network of connected peers - Maps topics to connected gossipsub peers. mesh: HashMap>, + /// Partial only subscribed topics. + #[cfg(feature = "partial_messages")] + partial_only_topics: BTreeSet, + /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -466,6 +470,8 @@ where data_transform, failed_messages: Default::default(), gossip_promises: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }) } @@ -529,7 +535,11 @@ where /// /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. - pub fn subscribe(&mut self, topic: &Topic) -> Result { + pub fn subscribe( + &mut self, + topic: &Topic, + #[cfg(feature = "partial_messages")] partial_only: bool, + ) -> Result { let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); @@ -543,13 +553,24 @@ where // send subscription request to all peers for peer_id in self.connected_peers.keys().copied().collect::>() { tracing::debug!(%peer_id, "Sending SUBSCRIBE to peer"); - let event = RpcOut::Subscribe(topic_hash.clone()); + let event = RpcOut::Subscribe { + topic: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_only, + }; self.send_message(peer_id, event); } // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); + #[cfg(feature = "partial_messages")] + { + if partial_only { + self.partial_only_topics.insert(topic_hash.clone()); + } + } + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -576,6 +597,10 @@ where // call LEAVE(topic) // this will remove the topic from the mesh self.leave(&topic_hash); + #[cfg(feature = "partial_messages")] + { + self.partial_only_topics.insert(topic_hash.clone()); + } tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); true @@ -589,15 +614,17 @@ where let peers_on_topic = self .connected_peers .iter() - .filter_map(|(peer_id, _peer)| { + .filter(|(_, peer)| { #[cfg(feature = "partial_messages")] { - if partial && _peer.partial_only_topics.contains(topic_hash) { - return None; + if partial && peer.partial_only_topics.contains(topic_hash) { + return false; } } - Some(peer_id) + let _ = peer; + true }) + .map(|(peer_id, _)| peer_id) .peekable(); let mut recipient_peers = HashSet::new(); @@ -3147,7 +3174,14 @@ where tracing::debug!(peer=%peer_id, "New peer connected"); // We need to send our subscriptions to the newly-connected node. for topic_hash in self.mesh.clone().into_keys() { - self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); + self.send_message( + peer_id, + RpcOut::Subscribe { + topic: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_only: self.partial_only_topics.contains(&topic_hash), + }, + ); } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index a2ac811ec8f..9d8c276798f 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -85,7 +85,7 @@ where // subscribe to the topics for t in self.topics { let topic = Topic::new(t); - gs.subscribe(&topic).unwrap(); + gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); topic_hashes.push(topic.hash().clone()); } @@ -463,7 +463,7 @@ fn test_subscribe() { .into_values() .fold(0, |mut collected_subscriptions, mut queue| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { .. }) = queue.try_pop() { collected_subscriptions += 1 } } @@ -523,7 +523,7 @@ fn test_unsubscribe() { .into_values() .fold(0, |mut collected_subscriptions, mut queue| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { .. }) = queue.try_pop() { collected_subscriptions += 1 } } @@ -581,7 +581,7 @@ fn test_join() { // re-subscribe - there should be peers associated with the topic assert!( - gs.subscribe(&topics[0]).unwrap(), + gs.subscribe(&topics[0], #[cfg(feature = "partial_messages")] false).unwrap(), "should be able to subscribe successfully" ); @@ -671,7 +671,7 @@ fn test_join() { } // subscribe to topic1 - gs.subscribe(&topics[1]).unwrap(); + gs.subscribe(&topics[1], #[cfg(feature = "partial_messages")] false).unwrap(); // the three new peers should have been added, along with 3 more from the pool. assert!( @@ -871,7 +871,7 @@ fn test_inject_connected() { HashMap::>::new(), |mut collected_subscriptions, (peer, mut queue)| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(topic)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { topic, .. }) = queue.try_pop() { let mut peer_subs = collected_subscriptions.remove(&peer).unwrap_or_default(); peer_subs.push(topic.into_string()); collected_subscriptions.insert(peer, peer_subs); @@ -1738,7 +1738,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { } // subscribe now to topic - gs.subscribe(&topic).unwrap(); + gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -1791,7 +1791,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); // subscribe now to topic - gs.subscribe(&topic).unwrap(); + gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -2196,7 +2196,7 @@ fn test_unsubscribe_backoff() { "Peer should be pruned with `unsubscribe_backoff`." ); - let _ = gs.subscribe(&Topic::new(topics[0].to_string())); + let _ = gs.subscribe(&Topic::new(topics[0].to_string()), #[cfg(feature = "partial_messages")] false); // forget all events until now let queues = flush_events(&mut gs, queues); @@ -5228,8 +5228,8 @@ fn test_subscribe_to_invalid_topic() { .to_subscribe(false) .create_network(); - assert!(gs.subscribe(&t1).is_ok()); - assert!(gs.subscribe(&t2).is_err()); + assert!(gs.subscribe(&t1, #[cfg(feature = "partial_messages")] false).is_ok()); + assert!(gs.subscribe(&t2, #[cfg(feature = "partial_messages")] false).is_err()); } #[test] @@ -5258,7 +5258,7 @@ fn test_subscribe_and_graft_with_negative_score() { let original_score = gs1.as_peer_score_mut().score_report(&p2).score; // subscribe to topic in gs2 - gs2.subscribe(&topic).unwrap(); + gs2.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, p1: PeerId, @@ -6364,7 +6364,7 @@ fn test_multiple_topics_with_different_configs() { // re-subscribe to topic1 assert!( - gs.subscribe(&Topic::new(topic_hashes[0].to_string())) + gs.subscribe(&Topic::new(topic_hashes[0].to_string()), #[cfg(feature = "partial_messages")] false) .unwrap(), "Should subscribe successfully" ); diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs index e067d74c662..24811c70242 100644 --- a/protocols/gossipsub/src/queue.rs +++ b/protocols/gossipsub/src/queue.rs @@ -62,7 +62,7 @@ impl Queue { /// which will only happen for control and non priority messages. pub(crate) fn try_push(&mut self, message: RpcOut) -> Result<(), Box> { match message { - RpcOut::Extensions(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { + RpcOut::Extensions(_) | RpcOut::Subscribe { .. } | RpcOut::Unsubscribe(_) => { self.priority .try_push(message) .expect("Shared is unbounded"); diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 9cd820ff280..f931f2f4548 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -379,7 +379,11 @@ pub enum RpcOut { timeout: Delay, }, /// Subscribe a topic. - Subscribe(TopicHash), + Subscribe { + topic: TopicHash, + #[cfg(feature = "partial_messages")] + partial_only: bool, + }, /// Unsubscribe a topic. Unsubscribe(TopicHash), /// Send a GRAFT control message. @@ -421,7 +425,7 @@ impl RpcOut { pub(crate) fn priority(&self) -> bool { matches!( self, - RpcOut::Subscribe(_) + RpcOut::Subscribe { .. } | RpcOut::Unsubscribe(_) | RpcOut::Graft(_) | RpcOut::Prune(_) @@ -448,12 +452,19 @@ impl From for proto::RPC { testExtension: None, partial: None, }, - RpcOut::Subscribe(topic) => proto::RPC { + RpcOut::Subscribe { + topic, + #[cfg(feature = "partial_messages")] + partial_only, + } => proto::RPC { publish: Vec::new(), subscriptions: vec![proto::SubOpts { subscribe: Some(true), topic_id: Some(topic.into_string()), + #[cfg(not(feature = "partial_messages"))] partial: None, + #[cfg(feature = "partial_messages")] + partial: Some(partial_only), }], control: None, testExtension: None, diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index d94297808ba..d519d8203e5 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -152,7 +152,7 @@ fn multi_hop_propagation() { // Subscribe each node to the same topic. let topic = gossipsub::IdentTopic::new("test-net"); for node in &mut graph.nodes { - node.behaviour_mut().subscribe(&topic).unwrap(); + node.behaviour_mut().subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); } // Wait for all nodes to be subscribed. From e1929e04a09ec31e5cf133bc9b793a829a62f8e6 Mon Sep 17 00:00:00 2001 From: Piotr Galar Date: Wed, 1 Oct 2025 17:39:56 +0200 Subject: [PATCH 21/68] chore(ci): remove usage of self-hosted runners (#6170) --- .github/workflows/ci.yml | 7 +------ .github/workflows/interop-test.yml | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bace02a86b0..3c607a04fb4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,12 +18,7 @@ env: jobs: test: name: Test ${{ matrix.crate }} - runs-on: ${{ fromJSON( - github.repository == 'libp2p/rust-libp2p' && ( - (contains(fromJSON('["libp2p-webrtc", "libp2p"]'), matrix.crate) && '["self-hosted", "linux", "x64", "2xlarge"]') || - (contains(fromJSON('["libp2p-quic", "libp2p-perf"]'), matrix.crate) && '["self-hosted", "linux", "x64", "xlarge"]') || - '["self-hosted", "linux", "x64", "large"]' - ) || '"ubuntu-latest"') }} + runs-on: ubuntu-latest timeout-minutes: 10 needs: gather_published_crates strategy: diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 0c0a90043f4..c5dbe2323ac 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -13,7 +13,7 @@ jobs: run-transport-interop: name: Run transport interoperability tests if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository - runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} + runs-on: ubuntu-latest strategy: matrix: flavour: [chromium, native] From 9d2eb00d754fa2e0795dabf71a092cb1499f952c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 2 Oct 2025 12:01:51 +0100 Subject: [PATCH 22/68] chore: Address latest clippy lints Pull-Request: #6171. --- protocols/gossipsub/src/behaviour/tests.rs | 4 ++-- protocols/kad/src/behaviour.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 04c86bd3df4..a17d1426760 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -4248,7 +4248,7 @@ fn test_scoring_p6() { // create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); - let peers = vec![ + let peers = [ add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, @@ -4258,7 +4258,7 @@ fn test_scoring_p6() { // create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); - let others = vec![ + let others = [ add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 191585b2f22..f5f44baec74 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -1230,7 +1230,7 @@ where let addrs = peer.multiaddrs.iter().cloned().collect(); query.peers.addresses.insert(peer.node_id, addrs); } - query.on_success(source, others_iter.cloned().map(|kp| kp.node_id)) + query.on_success(source, others_iter.map(|kp| kp.node_id)) } } From e7fea6c84a7ce8eba7b84a92ed189e17acfcc674 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 2 Oct 2025 13:29:23 +0100 Subject: [PATCH 23/68] chore(ci): address cargo deny issues Pull-Request: #6172. --- Cargo.lock | 50 ++++++++++++++------------------------------------ 1 file changed, 14 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2eeaab5bb83..c1e2bd967b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1592,8 +1592,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata", + "regex-syntax", ] [[package]] @@ -3385,11 +3385,11 @@ dependencies = [ [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -3704,12 +3704,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -4002,12 +4001,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.13.2" @@ -4604,17 +4597,8 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -4625,15 +4609,9 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.5" @@ -5986,14 +5964,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "sharded-slab", "smallvec", "thread_local", From 6cb5244c0cb17f1e37ad983898a1069b1557303f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 3 Oct 2025 13:04:13 +0100 Subject: [PATCH 24/68] polish PR --- protocols/gossipsub/src/behaviour.rs | 74 ++++++++++++++-------------- protocols/gossipsub/src/partial.rs | 2 +- protocols/gossipsub/src/protocol.rs | 1 - protocols/gossipsub/src/types.rs | 2 +- 4 files changed, 38 insertions(+), 41 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index ab740137d34..56d80c68c2f 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -147,6 +147,7 @@ pub enum Event { /// A new partial message has been received. #[cfg(feature = "partial_messages")] Partial { + /// The topic of the partial message. topic_id: TopicHash, /// The peer that forwarded us this message. propagation_source: PeerId, @@ -154,7 +155,7 @@ pub enum Event { group_id: Vec, /// The partial message data. message: Option>, - /// The partial message iwant. + /// The partial message metadata, what peer has and wants. metadata: Option>, }, /// A remote subscribed to a topic. @@ -845,11 +846,8 @@ where ) -> Result<(), PublishError> { let topic_id = topic.into(); - let metadata = partial_message.parts_metadata().as_ref().to_vec(); - let group_id = partial_message.group_id().as_ref().to_vec(); - // TODO: should we construct a recipient list just for partials? let recipient_peers = self.get_publish_peers(&topic_id, false); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` @@ -863,7 +861,7 @@ where let peer_partial = peer_partials.entry(group_id.clone()).or_default(); let Ok((message_data, rest_wanted)) = partial_message - .partial_message_bytes_from_metadata(&peer_partial.metadata) + .partial_message_bytes_from_metadata(peer_partial.metadata.as_ref()) .map(|(m, r)| (m.as_ref().to_vec(), r.map(|r| r.as_ref().to_vec()))) else { tracing::error!(peer = %peer_id, group_id = ?group_id, @@ -873,11 +871,13 @@ where }; match rest_wanted { - // No new data to send peer. - Some(r) if r == peer_partial.metadata => { - continue; + r @ Some(_) => { + // No new data to send peer. + if r == peer_partial.metadata { + continue; + } + peer_partial.metadata = r; } - Some(r) => peer_partial.metadata = r, // Peer partial is now complete // remove it from the list None => { @@ -889,7 +889,7 @@ where *peer_id, RpcOut::PartialMessage { message: message_data, - metadata: metadata.clone(), + metadata: partial_message.parts_metadata().as_ref().to_vec(), group_id: group_id.clone(), topic_id: topic_id.clone(), }, @@ -1676,7 +1676,7 @@ where } } - /// Handle incoming partial message from a peer + /// Handle incoming partial message from a peer. #[cfg(feature = "partial_messages")] fn handle_partial_message(&mut self, peer_id: &PeerId, partial_message: PartialMessage) { tracing::debug!( @@ -1686,7 +1686,7 @@ where "Received partial message" ); - // Check if peer exists + // Check if peer exists. let Some(peer) = self.connected_peers.get_mut(peer_id) else { tracing::error!( peer=%peer_id, @@ -1703,13 +1703,11 @@ where .or_default(); // Noop if the received partial is the same we already have. - if partial_message.metadata.as_ref() == Some(&peer_partial.metadata) { + if partial_message.metadata == peer_partial.metadata { return; } - if let Some(ref metadata) = partial_message.metadata { - peer_partial.metadata = metadata.clone(); - } + peer_partial.metadata = partial_message.metadata.clone(); self.events .push_back(ToSwarm::GenerateEvent(Event::Partial { @@ -2117,7 +2115,7 @@ where let filtered_topics = match self .subscription_filter - .filter_incoming_subscriptions(subscriptions, &peer.topics.iter().cloned().collect()) + .filter_incoming_subscriptions(subscriptions, &peer.topics) { Ok(topics) => topics, Err(s) => { @@ -3246,25 +3244,25 @@ where // remove peer from all mappings for topic in &connected_peer.topics { // check the mesh for the topic - if let Some(mesh_peers) = self.mesh.get_mut(&topic) { + if let Some(mesh_peers) = self.mesh.get_mut(topic) { // check if the peer is in the mesh and remove it if mesh_peers.remove(&peer_id) { #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.peers_removed(&topic, Churn::Dc, 1); - m.set_mesh_peers(&topic, mesh_peers.len()); + m.peers_removed(topic, Churn::Dc, 1); + m.set_mesh_peers(topic, mesh_peers.len()); } }; } #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.dec_topic_peers(&topic); + m.dec_topic_peers(topic); } // remove from fanout self.fanout - .get_mut(&topic) + .get_mut(topic) .map(|peers| peers.remove(&peer_id)); } @@ -3371,20 +3369,20 @@ where connected_peer.connections.push(connection_id); let queue = connected_peer.messages.clone(); - // if connected_peer.connections.len() <= 1 { // If this is the first connection send extensions message. - self.send_message( - peer_id, - RpcOut::Extensions(Extensions { - test_extension: Some(true), - partial_messages: if cfg!(feature = "partial_messages") { - Some(true) - } else { - None - }, - }), - ); - // } + if connected_peer.connections.len() <= 1 { + self.send_message( + peer_id, + RpcOut::Extensions(Extensions { + test_extension: Some(true), + partial_messages: if cfg!(feature = "partial_messages") { + Some(true) + } else { + None + }, + }), + ); + } // This clones a reference to the Queue so any new handlers reference the same underlying // queue. No data is actually cloned here. @@ -3418,8 +3416,8 @@ where connected_peer.connections.push(connection_id); let queue = connected_peer.messages.clone(); + // If this is the first connection send extensions message. if connected_peer.connections.len() <= 1 { - // If this is the first connection send extensions message. self.send_message( peer_id, RpcOut::Extensions(Extensions { @@ -3699,7 +3697,7 @@ fn peer_added_to_mesh( if let Some(peer) = connections.get(&peer_id) { for topic in &peer.topics { if !new_topics.contains(&topic) { - if let Some(mesh_peers) = mesh.get(&topic) { + if let Some(mesh_peers) = mesh.get(topic) { if mesh_peers.contains(&peer_id) { // the peer is already in a mesh for another topic return; @@ -3741,7 +3739,7 @@ fn peer_removed_from_mesh( if let Some(peer) = connections.get(&peer_id) { for topic in &peer.topics { if topic != old_topic { - if let Some(mesh_peers) = mesh.get(&topic) { + if let Some(mesh_peers) = mesh.get(topic) { if mesh_peers.contains(&peer_id) { // the peer exists in another mesh still return; diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 61d3b352a7e..1d6ca5a8476 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -59,7 +59,7 @@ pub trait Partial { /// - Optional remaining metadata if more parts are still available after this one fn partial_message_bytes_from_metadata( &self, - metadata: impl AsRef<[u8]>, + metadata: Option>, ) -> Result<(impl AsRef<[u8]>, Option>), PartialMessageError>; /// Extends this message with received partial message data. diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 09df77deb04..335ba1c307a 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -579,7 +579,6 @@ impl Decoder for GossipsubCodec { #[cfg(feature = "partial_messages")] let partial_message = rpc.partial.and_then(|partial_proto| { - // Extract topic and group context let Some(topic_id_bytes) = partial_proto.topicID else { tracing::debug!("Partial message without topic_id, discarding"); return None; diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index f931f2f4548..ea5183eca8b 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -118,7 +118,7 @@ pub(crate) struct PeerDetails { #[derive(Debug)] pub(crate) struct PartialData { /// The current peer partial metadata. - pub(crate) metadata: Vec, + pub(crate) metadata: Option>, /// The remaining heartbeats for this message to be deleted. pub(crate) ttl: usize, } From bc3d7ba13a39f7a8c2a367726aa0254d59e84ee1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 3 Oct 2025 13:05:30 +0100 Subject: [PATCH 25/68] push publish method above for smaller diff --- protocols/gossipsub/src/behaviour.rs | 212 +++++++++++++-------------- 1 file changed, 106 insertions(+), 106 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 56d80c68c2f..26452d2d836 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -607,6 +607,112 @@ where true } + /// Publishes a message with multiple topics to the network. + pub fn publish( + &mut self, + topic: impl Into, + data: impl Into>, + ) -> Result { + let data = data.into(); + let topic = topic.into(); + + // Transform the data before building a raw_message. + let transformed_data = self + .data_transform + .outbound_transform(&topic.clone(), data.clone())?; + + let max_transmit_size_for_topic = self + .config + .protocol_config() + .max_transmit_size_for_topic(&topic); + + // check that the size doesn't exceed the max transmission size. + if transformed_data.len() > max_transmit_size_for_topic { + return Err(PublishError::MessageTooLarge); + } + + let raw_message = self.build_raw_message(topic, transformed_data)?; + + // calculate the message id from the un-transformed data + let msg_id = self.config.message_id(&Message { + source: raw_message.source, + data, // the uncompressed form + sequence_number: raw_message.sequence_number, + topic: raw_message.topic.clone(), + }); + + // Check the if the message has been published before + if self.duplicate_cache.contains(&msg_id) { + // This message has already been seen. We don't re-publish messages that have already + // been published on the network. + tracing::warn!( + message_id=%msg_id, + "Not publishing a message that has already been published" + ); + return Err(PublishError::Duplicate); + } + + tracing::trace!(message_id=%msg_id, "Publishing message"); + + let topic_hash = raw_message.topic.clone(); + + let recipient_peers = self.get_publish_peers(&topic_hash, true); + + // If the message isn't a duplicate and we have sent it to some peers add it to the + // duplicate cache and memcache. + self.duplicate_cache.insert(msg_id.clone()); + self.mcache.put(&msg_id, raw_message.clone()); + + // Consider the message as delivered for gossip promises. + self.gossip_promises.message_delivered(&msg_id); + + // Send to peers we know are subscribed to the topic. + let mut publish_failed = true; + for peer_id in recipient_peers.iter() { + tracing::trace!(peer=%peer_id, "Sending message to peer"); + // If enabled, Send first an IDONTWANT so that if we are slower than forwarders + // publishing the original message we don't receive it back. + if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() + && self.config.idontwant_on_publish() + { + self.send_message( + *peer_id, + RpcOut::IDontWant(IDontWant { + message_ids: vec![msg_id.clone()], + }), + ); + } + + if self.send_message( + *peer_id, + RpcOut::Publish { + message_id: msg_id.clone(), + message: raw_message.clone(), + timeout: Delay::new(self.config.publish_queue_duration()), + }, + ) { + publish_failed = false + } + } + + if recipient_peers.is_empty() { + return Err(PublishError::NoPeersSubscribedToTopic); + } + + if publish_failed { + return Err(PublishError::AllQueuesFull(recipient_peers.len())); + } + + tracing::debug!(message_id=%msg_id, "Published message"); + + #[cfg(feature = "metrics")] + if let Some(metrics) = self.metrics.as_mut() { + metrics.register_published_message(&topic_hash); + } + + Ok(msg_id) + } + // Get Peers from the mesh or fanout to publish a message to. // If partial set, filter out peers who only want partial messages for the topic. fn get_publish_peers(&mut self, topic_hash: &TopicHash, partial: bool) -> HashSet { @@ -732,112 +838,6 @@ where recipient_peers } - /// Publishes a message with multiple topics to the network. - pub fn publish( - &mut self, - topic: impl Into, - data: impl Into>, - ) -> Result { - let data = data.into(); - let topic = topic.into(); - - // Transform the data before building a raw_message. - let transformed_data = self - .data_transform - .outbound_transform(&topic.clone(), data.clone())?; - - let max_transmit_size_for_topic = self - .config - .protocol_config() - .max_transmit_size_for_topic(&topic); - - // check that the size doesn't exceed the max transmission size. - if transformed_data.len() > max_transmit_size_for_topic { - return Err(PublishError::MessageTooLarge); - } - - let raw_message = self.build_raw_message(topic, transformed_data)?; - - // calculate the message id from the un-transformed data - let msg_id = self.config.message_id(&Message { - source: raw_message.source, - data, // the uncompressed form - sequence_number: raw_message.sequence_number, - topic: raw_message.topic.clone(), - }); - - // Check the if the message has been published before - if self.duplicate_cache.contains(&msg_id) { - // This message has already been seen. We don't re-publish messages that have already - // been published on the network. - tracing::warn!( - message_id=%msg_id, - "Not publishing a message that has already been published" - ); - return Err(PublishError::Duplicate); - } - - tracing::trace!(message_id=%msg_id, "Publishing message"); - - let topic_hash = raw_message.topic.clone(); - - let recipient_peers = self.get_publish_peers(&topic_hash, true); - - // If the message isn't a duplicate and we have sent it to some peers add it to the - // duplicate cache and memcache. - self.duplicate_cache.insert(msg_id.clone()); - self.mcache.put(&msg_id, raw_message.clone()); - - // Consider the message as delivered for gossip promises. - self.gossip_promises.message_delivered(&msg_id); - - // Send to peers we know are subscribed to the topic. - let mut publish_failed = true; - for peer_id in recipient_peers.iter() { - tracing::trace!(peer=%peer_id, "Sending message to peer"); - // If enabled, Send first an IDONTWANT so that if we are slower than forwarders - // publishing the original message we don't receive it back. - if raw_message.raw_protobuf_len() > self.config.idontwant_message_size_threshold() - && self.config.idontwant_on_publish() - { - self.send_message( - *peer_id, - RpcOut::IDontWant(IDontWant { - message_ids: vec![msg_id.clone()], - }), - ); - } - - if self.send_message( - *peer_id, - RpcOut::Publish { - message_id: msg_id.clone(), - message: raw_message.clone(), - timeout: Delay::new(self.config.publish_queue_duration()), - }, - ) { - publish_failed = false - } - } - - if recipient_peers.is_empty() { - return Err(PublishError::NoPeersSubscribedToTopic); - } - - if publish_failed { - return Err(PublishError::AllQueuesFull(recipient_peers.len())); - } - - tracing::debug!(message_id=%msg_id, "Published message"); - - #[cfg(feature = "metrics")] - if let Some(metrics) = self.metrics.as_mut() { - metrics.register_published_message(&topic_hash); - } - - Ok(msg_id) - } - #[cfg(feature = "partial_messages")] pub fn publish_partial( &mut self, From 79f097ed0684e3dd4a839b921369823c04258a95 Mon Sep 17 00:00:00 2001 From: Sergey Kaunov Date: Fri, 3 Oct 2025 19:34:16 +0300 Subject: [PATCH 26/68] fix(gossipsub): remove duplicate config builder method Noticed two methods which duplicated each other logic precisely, chose the one with more clear name, composed and improved the doc a tiny bit. Pull-Request: #6173. --- protocols/gossipsub/CHANGELOG.md | 3 +++ protocols/gossipsub/src/behaviour/tests.rs | 12 ++++++------ protocols/gossipsub/src/config.rs | 9 --------- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 0bfc18b5876..c0749bb30ee 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -14,6 +14,9 @@ - Fix incorrect default values in ConfigBuilder See [PR 6113](https://github.com/libp2p/rust-libp2p/pull/6113) +- Remove duplicated config `set_topic_max_transmit_size` method, prefer `max_transmit_size_for_topic`. + See [PR 6173](https://github.com/libp2p/rust-libp2p/pull/6173). + ## 0.49.2 - Relax `Behaviour::with_metrics` requirements, do not require DataTransform and TopicSubscriptionFilter to also impl Default diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index a17d1426760..394bc4cc7a8 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -6471,7 +6471,7 @@ fn test_publish_message_with_default_transmit_size_config() { let topic_hash = topic.hash(); let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), Config::default_max_transmit_size()) + .max_transmit_size_for_topic(Config::default_max_transmit_size(), topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6503,7 +6503,7 @@ fn test_publish_large_message_with_default_transmit_size_config() { let topic_hash = topic.hash(); let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), Config::default_max_transmit_size()) + .max_transmit_size_for_topic(Config::default_max_transmit_size(), topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6531,7 +6531,7 @@ fn test_publish_message_with_specific_transmit_size_config() { let max_topic_transmit_size = 2000; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_topic_transmit_size) + .max_transmit_size_for_topic(max_topic_transmit_size, topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6564,7 +6564,7 @@ fn test_publish_large_message_with_specific_transmit_size_config() { let max_topic_transmit_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_topic_transmit_size) + .max_transmit_size_for_topic(max_topic_transmit_size, topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6592,7 +6592,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { let max_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_size) + .max_transmit_size_for_topic(max_size, topic_hash.clone()) .validation_mode(ValidationMode::None) .build() .unwrap(); @@ -6696,7 +6696,7 @@ fn test_validation_message_size_within_topic_specific() { let max_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_size) + .max_transmit_size_for_topic(max_size, topic_hash.clone()) .validation_mode(ValidationMode::None) .build() .unwrap(); diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index fa685f3085a..615bd08da30 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -1075,15 +1075,6 @@ impl ConfigBuilder { self } - /// The topic max size sets message sizes for a given topic. - pub fn set_topic_max_transmit_size(&mut self, topic: TopicHash, max_size: usize) -> &mut Self { - self.config - .protocol - .max_transmit_sizes - .insert(topic, max_size); - self - } - /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config From 2fb2486d3c981f1931d6428ec7ec377ea21db5a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 7 Oct 2025 11:54:12 +0100 Subject: [PATCH 27/68] feat(gossipsub): upgrade internal message queue, This is the up-streaming of https://github.com/sigp/rust-libp2p/pull/570 which has been beeing used by https://github.com/sigp/lighthouse/ for some weeks now: This started with an attempt to solve libp2p#5751 using the previous internal async-channel. After multiple ideas were discussed off band, replacing the async-channel with an internal more tailored priority queue seemed inevitable. This priority queue allows us to implement the cancellation of in flight IDONTWANT's very cleanly with the `remove_data_messages` function. Clearing the stale messages likewise becomes simpler as we also make use of `remove_data_messages` . Pull-Request: #6175. --- protocols/gossipsub/CHANGELOG.md | 6 +- protocols/gossipsub/src/behaviour.rs | 129 ++-- protocols/gossipsub/src/behaviour/tests.rs | 840 +++++++++------------ protocols/gossipsub/src/handler.rs | 24 +- protocols/gossipsub/src/lib.rs | 2 +- protocols/gossipsub/src/metrics.rs | 146 ++-- protocols/gossipsub/src/protocol.rs | 1 + protocols/gossipsub/src/queue.rs | 294 ++++++++ protocols/gossipsub/src/types.rs | 70 +- 9 files changed, 840 insertions(+), 672 deletions(-) create mode 100644 protocols/gossipsub/src/queue.rs diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index c0749bb30ee..6e19eaf1be9 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -13,10 +13,13 @@ - Fix incorrect default values in ConfigBuilder See [PR 6113](https://github.com/libp2p/rust-libp2p/pull/6113) - + - Remove duplicated config `set_topic_max_transmit_size` method, prefer `max_transmit_size_for_topic`. See [PR 6173](https://github.com/libp2p/rust-libp2p/pull/6173). +- Switch the internal `async-channel` used to dispatch messages from `NetworkBehaviour` to the `ConnectionHandler` + with an internal priority queue. See [PR 6175](https://github.com/libp2p/rust-libp2p/pull/6175) + ## 0.49.2 - Relax `Behaviour::with_metrics` requirements, do not require DataTransform and TopicSubscriptionFilter to also impl Default @@ -37,6 +40,7 @@ - Feature gate metrics related code. This changes some `Behaviour` constructor methods. See [PR 6020](https://github.com/libp2p/rust-libp2p/pull/6020) + - Send IDONTWANT before Publishing a new message. See [PR 6017](https://github.com/libp2p/rust-libp2p/pull/6017) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 4c2b89bed31..a0a3a16f0e7 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -61,7 +61,7 @@ use crate::{ mcache::MessageCache, peer_score::{PeerScore, PeerScoreParams, PeerScoreState, PeerScoreThresholds, RejectReason}, protocol::SIGNING_PREFIX, - rpc::Sender, + queue::Queue, rpc_proto::proto, subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}, time_cache::DuplicateCache, @@ -751,6 +751,7 @@ where if self.send_message( *peer_id, RpcOut::Publish { + message_id: msg_id.clone(), message: raw_message.clone(), timeout: Delay::new(self.config.publish_queue_duration()), }, @@ -1341,6 +1342,7 @@ where self.send_message( *peer_id, RpcOut::Forward { + message_id: id.clone(), message: msg, timeout: Delay::new(self.config.forward_queue_duration()), }, @@ -2081,9 +2083,9 @@ where // steady-state size of the queues. #[cfg(feature = "metrics")] if let Some(m) = &mut self.metrics { - for sender_queue in self.connected_peers.values().map(|v| &v.sender) { - m.observe_priority_queue_size(sender_queue.priority_queue_len()); - m.observe_non_priority_queue_size(sender_queue.non_priority_queue_len()); + for sender_queue in self.connected_peers.values().map(|v| &v.messages) { + m.observe_priority_queue_size(sender_queue.priority_len()); + m.observe_non_priority_queue_size(sender_queue.non_priority_len()); } } @@ -2499,6 +2501,11 @@ where // Report expired messages for (peer_id, failed_messages) in self.failed_messages.drain() { tracing::debug!("Peer couldn't consume messages: {:?}", failed_messages); + #[cfg(feature = "metrics")] + if let Some(metrics) = self.metrics.as_mut() { + metrics.observe_failed_priority_messages(failed_messages.priority); + metrics.observe_failed_non_priority_messages(failed_messages.non_priority); + } self.events .push_back(ToSwarm::GenerateEvent(Event::SlowPeer { peer_id, @@ -2746,6 +2753,7 @@ where self.send_message( *peer_id, RpcOut::Forward { + message_id: msg_id.clone(), message: message.clone(), timeout: Delay::new(self.config.forward_queue_duration()), }, @@ -2874,8 +2882,9 @@ where return false; } - // Try sending the message to the connection handler. - match peer.sender.send_message(rpc) { + // Try sending the message to the connection handler, + // High priority messages should not fail. + match peer.messages.try_push(rpc) { Ok(()) => true, Err(rpc) => { // Sending failed because the channel is full. @@ -2883,24 +2892,10 @@ where // Update failed message counter. let failed_messages = self.failed_messages.entry(peer_id).or_default(); - match rpc { - RpcOut::Publish { .. } => { - failed_messages.priority += 1; - failed_messages.publish += 1; - } - RpcOut::Forward { .. } => { - failed_messages.non_priority += 1; - failed_messages.forward += 1; - } - RpcOut::IWant(_) | RpcOut::IHave(_) | RpcOut::IDontWant(_) => { - failed_messages.non_priority += 1; - } - RpcOut::Graft(_) - | RpcOut::Prune(_) - | RpcOut::Subscribe(_) - | RpcOut::Unsubscribe(_) => { - unreachable!("Channel for highpriority control messages is unbounded and should always be open.") - } + if rpc.priority() { + failed_messages.priority += 1; + } else { + failed_messages.non_priority += 1; } // Update peer score. @@ -3125,23 +3120,22 @@ where // The protocol negotiation occurs once a message is sent/received. Once this happens we // update the type of peer that this is in order to determine which kind of routing should // occur. - let connected_peer = self - .connected_peers - .entry(peer_id) - .or_insert_with(|| PeerDetails { - kind: PeerKind::Floodsub, - connections: vec![], - outbound: false, - sender: Sender::new(self.config.connection_handler_queue_len()), - topics: Default::default(), - dont_send: LinkedHashMap::new(), - }); + let connected_peer = self.connected_peers.entry(peer_id).or_insert(PeerDetails { + kind: PeerKind::Floodsub, + connections: vec![], + outbound: false, + messages: Queue::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + dont_send: LinkedHashMap::new(), + }); // Add the new connection connected_peer.connections.push(connection_id); + // This clones a reference to the Queue so any new handlers reference the same underlying + // queue. No data is actually cloned here. Ok(Handler::new( self.config.protocol_config(), - connected_peer.sender.new_receiver(), + connected_peer.messages.clone(), )) } @@ -3153,25 +3147,24 @@ where _: Endpoint, _: PortUse, ) -> Result, ConnectionDenied> { - let connected_peer = self - .connected_peers - .entry(peer_id) - .or_insert_with(|| PeerDetails { - kind: PeerKind::Floodsub, - connections: vec![], - // Diverging from the go implementation we only want to consider a peer as outbound - // peer if its first connection is outbound. - outbound: !self.px_peers.contains(&peer_id), - sender: Sender::new(self.config.connection_handler_queue_len()), - topics: Default::default(), - dont_send: LinkedHashMap::new(), - }); + let connected_peer = self.connected_peers.entry(peer_id).or_insert(PeerDetails { + kind: PeerKind::Floodsub, + connections: vec![], + // Diverging from the go implementation we only want to consider a peer as outbound peer + // if its first connection is outbound. + outbound: !self.px_peers.contains(&peer_id), + messages: Queue::new(self.config.connection_handler_queue_len()), + topics: Default::default(), + dont_send: LinkedHashMap::new(), + }); // Add the new connection connected_peer.connections.push(connection_id); + // This clones a reference to the Queue so any new handlers reference the same underlying + // queue. No data is actually cloned here. Ok(Handler::new( self.config.protocol_config(), - connected_peer.sender.new_receiver(), + connected_peer.messages.clone(), )) } @@ -3213,6 +3206,8 @@ where } } } + // rpc is only used for metrics code. + #[allow(unused_variables)] HandlerEvent::MessageDropped(rpc) => { // Account for this in the scoring logic if let PeerScoreState::Active(peer_score) = &mut self.peer_score { @@ -3221,32 +3216,7 @@ where // Keep track of expired messages for the application layer. let failed_messages = self.failed_messages.entry(propagation_source).or_default(); - failed_messages.timeout += 1; - match rpc { - RpcOut::Publish { .. } => { - failed_messages.publish += 1; - } - RpcOut::Forward { .. } => { - failed_messages.forward += 1; - } - _ => {} - } - - // Record metrics on the failure. - #[cfg(feature = "metrics")] - if let Some(metrics) = self.metrics.as_mut() { - match rpc { - RpcOut::Publish { message, .. } => { - metrics.publish_msg_dropped(&message.topic); - metrics.timeout_msg_dropped(&message.topic); - } - RpcOut::Forward { message, .. } => { - metrics.forward_msg_dropped(&message.topic); - metrics.timeout_msg_dropped(&message.topic); - } - _ => {} - } - } + failed_messages.non_priority += 1; } HandlerEvent::Message { rpc, @@ -3345,10 +3315,17 @@ where "Could not handle IDONTWANT, peer doesn't exist in connected peer list"); continue; }; + + // Remove messages from the queue. + #[allow(unused)] + let removed = peer.messages.remove_data_messages(&message_ids); + #[cfg(feature = "metrics")] if let Some(metrics) = self.metrics.as_mut() { metrics.register_idontwant(message_ids.len()); + metrics.register_removed_messages(removed); } + for message_id in message_ids { peer.dont_send.insert(message_id, Instant::now()); // Don't exceed capacity. diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 394bc4cc7a8..7dd6c4f3689 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -20,7 +20,7 @@ // Collection of tests for the gossipsub network behaviour -use std::{future, net::Ipv4Addr, thread::sleep}; +use std::{net::Ipv4Addr, thread::sleep}; use asynchronous_codec::{Decoder, Encoder}; use byteorder::{BigEndian, ByteOrder}; @@ -32,7 +32,6 @@ use super::*; use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, - rpc::Receiver, subscription_filter::WhitelistSubscriptionFilter, types::RpcIn, IdentTopic as Topic, @@ -63,7 +62,7 @@ where ) -> ( Behaviour, Vec, - HashMap, + HashMap, Vec, ) { let keypair = libp2p_identity::Keypair::generate_ed25519(); @@ -92,11 +91,11 @@ where // build and connect peer_no random peers let mut peers = vec![]; - let mut receivers = HashMap::new(); + let mut queues = HashMap::new(); let empty = vec![]; for i in 0..self.peer_no { - let (peer, receiver) = add_peer_with_addr_and_kind( + let (peer, queue) = add_peer_with_addr_and_kind( &mut gs, if self.to_subscribe { &topic_hashes @@ -109,10 +108,10 @@ where self.peer_kind.or(Some(PeerKind::Gossipsubv1_1)), ); peers.push(peer); - receivers.insert(peer, receiver); + queues.insert(peer, queue); } - (gs, peers, receivers, topic_hashes) + (gs, peers, queues, topic_hashes) } fn peer_no(mut self, peer_no: usize) -> Self { @@ -181,7 +180,7 @@ fn add_peer( topic_hashes: &[TopicHash], outbound: bool, explicit: bool, -) -> (PeerId, Receiver) +) -> (PeerId, Queue) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -195,7 +194,7 @@ fn add_peer_with_addr( outbound: bool, explicit: bool, address: Multiaddr, -) -> (PeerId, Receiver) +) -> (PeerId, Queue) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -217,7 +216,7 @@ fn add_peer_with_addr_and_kind( explicit: bool, address: Multiaddr, kind: Option, -) -> (PeerId, Receiver) +) -> (PeerId, Queue) where D: DataTransform + Default + Clone + Send + 'static, F: TopicSubscriptionFilter + Clone + Default + Send + 'static, @@ -236,8 +235,8 @@ where } }; - let sender = Sender::new(gs.config.connection_handler_queue_len()); - let receiver = sender.new_receiver(); + let queue = Queue::new(gs.config.connection_handler_queue_len()); + let receiver_queue = queue.clone(); let connection_id = ConnectionId::new_unchecked(0); gs.connected_peers.insert( peer, @@ -246,7 +245,7 @@ where outbound, connections: vec![connection_id], topics: Default::default(), - sender, + messages: queue, dont_send: LinkedHashMap::new(), }, ); @@ -281,7 +280,7 @@ where &peer, ); } - (peer, receiver) + (peer, receiver_queue) } fn disconnect_peer(gs: &mut Behaviour, peer_id: &PeerId) @@ -438,7 +437,7 @@ fn test_subscribe() { // - run JOIN(topic) let subscribe_topic = vec![String::from("test_subscribe")]; - let (gs, _, receivers, topic_hashes) = inject_nodes1() + let (gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(subscribe_topic) .to_subscribe(true) @@ -450,12 +449,11 @@ fn test_subscribe() { ); // collect all the subscriptions - let subscriptions = receivers + let subscriptions = queues .into_values() - .fold(0, |mut collected_subscriptions, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + .fold(0, |mut collected_subscriptions, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { collected_subscriptions += 1 } } @@ -481,7 +479,7 @@ fn test_unsubscribe() { .collect::>(); // subscribe to topic_strings - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) @@ -511,12 +509,11 @@ fn test_unsubscribe() { ); // collect all the subscriptions - let subscriptions = receivers + let subscriptions = queues .into_values() - .fold(0, |mut collected_subscriptions, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Subscribe(_)) = priority.try_recv() { + .fold(0, |mut collected_subscriptions, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { collected_subscriptions += 1 } } @@ -553,14 +550,14 @@ fn test_join() { .map(|t| Topic::new(t.clone())) .collect::>(); - let (mut gs, _, mut receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, mut queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topic_strings) .to_subscribe(true) .create_network(); // Flush previous GRAFT messages. - receivers = flush_events(&mut gs, receivers); + queues = flush_events(&mut gs, queues); // unsubscribe, then call join to invoke functionality assert!( @@ -584,31 +581,23 @@ fn test_join() { "Should have added 6 nodes to the mesh" ); - fn count_grafts(receivers: HashMap) -> (usize, HashMap) { - let mut new_receivers = HashMap::new(); + fn count_grafts(queues: HashMap) -> (usize, HashMap) { + let mut new_queues = HashMap::new(); let mut acc = 0; - for (peer_id, c) in receivers.into_iter() { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Graft(_)) = priority.try_recv() { + for (peer_id, mut queue) in queues.into_iter() { + while !queue.is_empty() { + if let Some(RpcOut::Graft(_)) = queue.try_pop() { acc += 1; } } - new_receivers.insert( - peer_id, - Receiver { - priority_queue_len: c.priority_queue_len, - priority: c.priority, - non_priority: c.non_priority, - }, - ); + new_queues.insert(peer_id, queue); } - (acc, new_receivers) + (acc, new_queues) } // there should be mesh_n GRAFT messages. - let (graft_messages, mut receivers) = count_grafts(receivers); + let (graft_messages, mut queues) = count_grafts(queues); assert_eq!( graft_messages, 6, @@ -632,8 +621,8 @@ fn test_join() { &address, ) .unwrap(); - let sender = Sender::new(gs.config.connection_handler_queue_len()); - let receiver = sender.new_receiver(); + let queue = Queue::new(gs.config.connection_handler_queue_len()); + let receiver_queue = queue.clone(); let connection_id = ConnectionId::new_unchecked(0); gs.connected_peers.insert( random_peer, @@ -642,11 +631,11 @@ fn test_join() { outbound: false, connections: vec![connection_id], topics: Default::default(), - sender, + messages: queue, dont_send: LinkedHashMap::new(), }, ); - receivers.insert(random_peer, receiver); + queues.insert(random_peer, receiver_queue); gs.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { peer_id: random_peer, @@ -683,7 +672,7 @@ fn test_join() { } // there should now 6 graft messages to be sent - let (graft_messages, _) = count_grafts(receivers); + let (graft_messages, _) = count_grafts(queues); assert_eq!( graft_messages, 6, @@ -705,7 +694,7 @@ fn test_publish_without_flood_publishing() { .unwrap(); let publish_topic = String::from("test_publish"); - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![publish_topic.clone()]) .to_subscribe(true) @@ -732,12 +721,11 @@ fn test_publish_without_flood_publishing() { gs.publish(Topic::new(publish_topic), publish_data).unwrap(); // Collect all publish messages - let publishes = receivers + let publishes = queues .into_values() - .fold(vec![], |mut collected_publish, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + .fold(vec![], |mut collected_publish, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { collected_publish.push(message); } } @@ -785,7 +773,7 @@ fn test_fanout() { .unwrap(); let fanout_topic = String::from("test_fanout"); - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![fanout_topic.clone()]) .to_subscribe(true) @@ -817,12 +805,11 @@ fn test_fanout() { ); // Collect all publish messages - let publishes = receivers + let publishes = queues .into_values() - .fold(vec![], |mut collected_publish, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + .fold(vec![], |mut collected_publish, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { collected_publish.push(message); } } @@ -857,7 +844,7 @@ fn test_fanout() { /// Test the gossipsub NetworkBehaviour peer connection logic. #[test] fn test_inject_connected() { - let (gs, peers, receivers, topic_hashes) = inject_nodes1() + let (gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -865,12 +852,11 @@ fn test_inject_connected() { // check that our subscriptions are sent to each of the peers // collect all the SendEvents - let subscriptions = receivers.into_iter().fold( + let subscriptions = queues.into_iter().fold( HashMap::>::new(), - |mut collected_subscriptions, (peer, c)| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Subscribe(topic)) = priority.try_recv() { + |mut collected_subscriptions, (peer, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::Subscribe(topic)) = queue.try_pop() { let mut peer_subs = collected_subscriptions.remove(&peer).unwrap_or_default(); peer_subs.push(topic.into_string()); collected_subscriptions.insert(peer, peer_subs); @@ -913,7 +899,7 @@ fn test_handle_received_subscriptions() { .iter() .map(|&t| String::from(t)) .collect(); - let (mut gs, peers, _receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, _queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(topics) .to_subscribe(false) @@ -1039,7 +1025,7 @@ fn test_get_random_peers() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); @@ -1073,7 +1059,7 @@ fn test_get_random_peers() { /// Tests that the correct message is sent when a peer asks for a message in our cache. #[test] fn test_handle_iwant_msg_cached() { - let (mut gs, peers, receivers, _) = inject_nodes1() + let (mut gs, peers, queues, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1101,12 +1087,11 @@ fn test_handle_iwant_msg_cached() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // the messages we are sending - let sent_messages = receivers + let sent_messages = queues .into_values() - .fold(vec![], |mut collected_messages, c| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + .fold(vec![], |mut collected_messages, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Forward { message, .. }) = queue.try_pop() { collected_messages.push(message) } } @@ -1125,7 +1110,7 @@ fn test_handle_iwant_msg_cached() { /// Tests that messages are sent correctly depending on the shifting of the message cache. #[test] fn test_handle_iwant_msg_cached_shifted() { - let (mut gs, peers, mut receivers, _) = inject_nodes1() + let (mut gs, peers, mut queues, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1159,28 +1144,23 @@ fn test_handle_iwant_msg_cached_shifted() { // is the message is being sent? let mut message_exists = false; - receivers = receivers.into_iter().map(|(peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message, timeout: _ }) if + queues = queues + .into_iter() + .map(|(peer_id, mut queue)| { + while !queue.is_empty() { + if matches!(queue.try_pop(), Some(RpcOut::Forward{message, ..}) if gs.config.message_id( &gs.data_transform .inbound_transform(message.clone()) .unwrap(), ) == msg_id) - { - message_exists = true; + { + message_exists = true; + } } - } - ( - peer_id, - Receiver { - priority_queue_len: c.priority_queue_len, - priority: c.priority, - non_priority: c.non_priority, - }, - ) - }).collect(); + (peer_id, queue) + }) + .collect(); // default history_length is 5, expect no messages after shift > 5 if shift < 5 { assert!( @@ -1217,7 +1197,7 @@ fn test_handle_iwant_msg_not_cached() { #[test] fn test_handle_iwant_msg_but_already_sent_idontwant() { - let (mut gs, peers, receivers, _) = inject_nodes1() + let (mut gs, peers, queues, _) = inject_nodes1() .peer_no(20) .topics(Vec::new()) .to_subscribe(true) @@ -1263,15 +1243,15 @@ fn test_handle_iwant_msg_but_already_sent_idontwant() { gs.handle_iwant(&peers[1], vec![msg_id.clone()]); // Check that no messages are sent. - receivers.iter().for_each(|(_, receiver)| { - assert!(receiver.non_priority.get_ref().is_empty()); + queues.iter().for_each(|(_, receiver_queue)| { + assert!(receiver_queue.is_empty()); }); } /// tests that an event is created when a peer shares that it has a message we want #[test] fn test_handle_ihave_subscribed_and_msg_not_cached() { - let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, mut queues, topic_hashes) = inject_nodes1() .peer_no(20) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1284,10 +1264,9 @@ fn test_handle_ihave_subscribed_and_msg_not_cached() { // check that we sent an IWANT request for `unknown id` let mut iwant_exists = false; - let receiver = receivers.remove(&peers[7]).unwrap(); - let non_priority = receiver.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::IWant(IWant { message_ids })) = non_priority.try_recv() { + let mut receiver_queue = queues.remove(&peers[7]).unwrap(); + while !receiver_queue.is_empty() { + if let Some(RpcOut::IWant(IWant { message_ids })) = receiver_queue.try_pop() { if message_ids .iter() .any(|m| *m == MessageId::new(b"unknown id")) @@ -1457,61 +1436,37 @@ fn test_handle_prune_peer_in_mesh() { } fn count_control_msgs( - receivers: HashMap, + queues: HashMap, mut filter: impl FnMut(&PeerId, &RpcOut) -> bool, -) -> (usize, HashMap) { - let mut new_receivers = HashMap::new(); +) -> (usize, HashMap) { + let mut new_queues = HashMap::new(); let mut collected_messages = 0; - for (peer_id, c) in receivers.into_iter() { - let priority = c.priority.get_ref(); - let non_priority = c.non_priority.get_ref(); - while !priority.is_empty() || !non_priority.is_empty() { - if let Ok(rpc) = priority.try_recv() { - if filter(&peer_id, &rpc) { - collected_messages += 1; - } - } - if let Ok(rpc) = non_priority.try_recv() { + for (peer_id, mut queue) in queues.into_iter() { + while !queue.is_empty() { + if let Some(rpc) = queue.try_pop() { if filter(&peer_id, &rpc) { collected_messages += 1; } } } - new_receivers.insert( - peer_id, - Receiver { - priority_queue_len: c.priority_queue_len, - priority: c.priority, - non_priority: c.non_priority, - }, - ); + new_queues.insert(peer_id, queue); } - (collected_messages, new_receivers) + (collected_messages, new_queues) } fn flush_events( gs: &mut Behaviour, - receivers: HashMap, -) -> HashMap { + queues: HashMap, +) -> HashMap { gs.events.clear(); - let mut new_receivers = HashMap::new(); - for (peer_id, c) in receivers.into_iter() { - let priority = c.priority.get_ref(); - let non_priority = c.non_priority.get_ref(); - while !priority.is_empty() || !non_priority.is_empty() { - let _ = priority.try_recv(); - let _ = non_priority.try_recv(); + let mut new_queues = HashMap::new(); + for (peer_id, mut queue) in queues.into_iter() { + while !queue.is_empty() { + let _ = queue.try_pop(); } - new_receivers.insert( - peer_id, - Receiver { - priority_queue_len: c.priority_queue_len, - priority: c.priority, - non_priority: c.non_priority, - }, - ); + new_queues.insert(peer_id, queue); } - new_receivers + new_queues } /// tests that a peer added as explicit peer gets connected to @@ -1550,7 +1505,7 @@ fn test_explicit_peer_reconnects() { .check_explicit_peers_ticks(2) .build() .unwrap(); - let (mut gs, others, receivers, _) = inject_nodes1() + let (mut gs, others, queues, _) = inject_nodes1() .peer_no(1) .topics(Vec::new()) .to_subscribe(true) @@ -1562,7 +1517,7 @@ fn test_explicit_peer_reconnects() { // add peer as explicit peer gs.add_explicit_peer(peer); - flush_events(&mut gs, receivers); + flush_events(&mut gs, queues); // disconnect peer disconnect_peer(&mut gs, peer); @@ -1600,7 +1555,7 @@ fn test_explicit_peer_reconnects() { #[test] fn test_handle_graft_explicit_peer() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(1) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1617,7 +1572,7 @@ fn test_handle_graft_explicit_peer() { assert!(gs.mesh[&topic_hashes[1]].is_empty()); // check prunes - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == peer && match m { RpcOut::Prune(Prune { topic_hash, .. }) => { @@ -1634,7 +1589,7 @@ fn test_handle_graft_explicit_peer() { #[test] fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { - let (gs, peers, receivers, topic_hashes) = inject_nodes1() + let (gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -1649,7 +1604,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { ); // assert that graft gets created to non-explicit peer - let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, queues) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); assert!( @@ -1658,7 +1613,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { ); // assert that no graft gets created to explicit peer - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); assert_eq!( @@ -1669,7 +1624,7 @@ fn explicit_peers_not_added_to_mesh_on_receiving_subscription() { #[test] fn do_not_graft_explicit_peer() { - let (mut gs, others, receivers, topic_hashes) = inject_nodes1() + let (mut gs, others, queues, topic_hashes) = inject_nodes1() .peer_no(1) .topics(vec![String::from("topic")]) .to_subscribe(true) @@ -1683,7 +1638,7 @@ fn do_not_graft_explicit_peer() { assert_eq!(gs.mesh[&topic_hashes[0]], BTreeSet::new()); // assert that no graft gets created to explicit peer - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &others[0] && matches!(m, RpcOut::Graft { .. }) }); assert_eq!( @@ -1694,7 +1649,7 @@ fn do_not_graft_explicit_peer() { #[test] fn do_forward_messages_to_explicit_peers() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1715,10 +1670,9 @@ fn do_forward_messages_to_explicit_peers() { }; gs.handle_received_message(message.clone(), &local_id); assert_eq!( - receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if matches!(non_priority.try_recv(), Ok(RpcOut::Forward{message: m, timeout: _}) if peer_id == peers[0] && m.data == message.data) { + queues.into_iter().fold(0, |mut fwds, (peer_id, mut queue)| { + while !queue.is_empty() { + if matches!(queue.try_pop(), Some(RpcOut::Forward{message: m, ..}) if peer_id == peers[0] && m.data == message.data) { fwds +=1; } } @@ -1731,7 +1685,7 @@ fn do_forward_messages_to_explicit_peers() { #[test] fn explicit_peers_not_added_to_mesh_on_subscribe() { - let (mut gs, peers, receivers, _) = inject_nodes1() + let (mut gs, peers, queues, _) = inject_nodes1() .peer_no(2) .topics(Vec::new()) .to_subscribe(true) @@ -1759,7 +1713,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); // assert that graft gets created to non-explicit peer - let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, queues) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); assert!( @@ -1768,7 +1722,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { ); // assert that no graft gets created to explicit peer - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); assert_eq!( @@ -1779,7 +1733,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { #[test] fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { - let (mut gs, peers, receivers, _) = inject_nodes1() + let (mut gs, peers, queues, _) = inject_nodes1() .peer_no(2) .topics(Vec::new()) .to_subscribe(true) @@ -1810,7 +1764,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); // assert that graft gets created to non-explicit peer - let (control_msgs, receivers) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, queues) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[1] && matches!(m, RpcOut::Graft { .. }) }); assert!( @@ -1819,7 +1773,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { ); // assert that no graft gets created to explicit peer - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && matches!(m, RpcOut::Graft { .. }) }); assert_eq!( @@ -1830,7 +1784,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { #[test] fn no_gossip_gets_sent_to_explicit_peers() { - let (mut gs, peers, mut receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, mut queues, topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1"), String::from("topic2")]) .to_subscribe(true) @@ -1859,11 +1813,10 @@ fn no_gossip_gets_sent_to_explicit_peers() { } // assert that no gossip gets sent to explicit peer - let receiver = receivers.remove(&peers[0]).unwrap(); + let mut receiver_queue = queues.remove(&peers[0]).unwrap(); let mut gossips = 0; - let non_priority = receiver.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::IHave(_)) = non_priority.try_recv() { + while !receiver_queue.is_empty() { + if let Some(RpcOut::IHave(_)) = receiver_queue.try_pop() { gossips += 1; } } @@ -1876,7 +1829,7 @@ fn test_mesh_addition() { let config: Config = Config::default(); // Adds mesh_low peers and PRUNE 2 giving us a deficit. - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(config.mesh_n() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1912,7 +1865,7 @@ fn test_mesh_subtraction() { // Adds mesh_low peers and PRUNE 2 giving us a deficit. let n = config.mesh_n_high() + 10; // make all outbound connections so that we allow grafting to all - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -1992,7 +1945,7 @@ fn test_send_px_and_backoff_in_prune() { let config: Config = Config::default(); // build mesh with enough peers for px - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2008,7 +1961,7 @@ fn test_send_px_and_backoff_in_prune() { ); // check prune message - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && match m { RpcOut::Prune(Prune { @@ -2034,7 +1987,7 @@ fn test_prune_backoffed_peer_on_graft() { let config: Config = Config::default(); // build mesh with enough peers for px - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(config.prune_peers() + 1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2051,13 +2004,13 @@ fn test_prune_backoffed_peer_on_graft() { ); // ignore all messages until now - let receivers = flush_events(&mut gs, receivers); + let queues = flush_events(&mut gs, queues); // handle graft gs.handle_graft(&peers[0], vec![topics[0].clone()]); // check prune message - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && match m { RpcOut::Prune(Prune { @@ -2084,7 +2037,7 @@ fn test_do_not_graft_within_backoff_period() { .build() .unwrap(); // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2095,7 +2048,7 @@ fn test_do_not_graft_within_backoff_period() { gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), Some(1))]); // forget all events until now - let receivers = flush_events(&mut gs, receivers); + let queues = flush_events(&mut gs, queues); // call heartbeat gs.heartbeat(); @@ -2108,8 +2061,8 @@ fn test_do_not_graft_within_backoff_period() { // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). - let (control_msgs, receivers) = - count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, queues) = + count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( control_msgs, 0, "Graft message created too early within backoff period" @@ -2120,7 +2073,7 @@ fn test_do_not_graft_within_backoff_period() { gs.heartbeat(); // check that graft got created - let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, _) = count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, "No graft message was created after backoff period" @@ -2137,7 +2090,7 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without .build() .unwrap(); // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2148,7 +2101,7 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without gs.handle_prune(&peers[0], vec![(topics[0].clone(), Vec::new(), None)]); // forget all events until now - let receivers = flush_events(&mut gs, receivers); + let queues = flush_events(&mut gs, queues); // call heartbeat gs.heartbeat(); @@ -2159,8 +2112,8 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). - let (control_msgs, receivers) = - count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, queues) = + count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( control_msgs, 0, "Graft message created too early within backoff period" @@ -2171,7 +2124,7 @@ fn test_do_not_graft_within_default_backoff_period_after_receiving_prune_without gs.heartbeat(); // check that graft got created - let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, _) = count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, "No graft message was created after backoff period" @@ -2192,7 +2145,7 @@ fn test_unsubscribe_backoff() { let topic = String::from("test"); // only one peer => mesh too small and will try to regraft as early as possible - let (mut gs, _, receivers, topics) = inject_nodes1() + let (mut gs, _, queues, topics) = inject_nodes1() .peer_no(1) .topics(vec![topic.clone()]) .to_subscribe(true) @@ -2201,7 +2154,7 @@ fn test_unsubscribe_backoff() { let _ = gs.unsubscribe(&Topic::new(topic)); - let (control_msgs, receivers) = count_control_msgs(receivers, |_, m| match m { + let (control_msgs, queues) = count_control_msgs(queues, |_, m| match m { RpcOut::Prune(Prune { backoff, .. }) => backoff == &Some(1), _ => false, }); @@ -2213,7 +2166,7 @@ fn test_unsubscribe_backoff() { let _ = gs.subscribe(&Topic::new(topics[0].to_string())); // forget all events until now - let receivers = flush_events(&mut gs, receivers); + let queues = flush_events(&mut gs, queues); // call heartbeat gs.heartbeat(); @@ -2226,8 +2179,8 @@ fn test_unsubscribe_backoff() { // Check that no graft got created (we have backoff_slack = 1 therefore one more heartbeat // is needed). - let (control_msgs, receivers) = - count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, queues) = + count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert_eq!( control_msgs, 0, "Graft message created too early within backoff period" @@ -2238,7 +2191,7 @@ fn test_unsubscribe_backoff() { gs.heartbeat(); // check that graft got created - let (control_msgs, _) = count_control_msgs(receivers, |_, m| matches!(m, RpcOut::Graft { .. })); + let (control_msgs, _) = count_control_msgs(queues, |_, m| matches!(m, RpcOut::Graft { .. })); assert!( control_msgs > 0, "No graft message was created after backoff period" @@ -2251,7 +2204,7 @@ fn test_flood_publish() { let topic = "test"; // Adds more peers than mesh can hold to test flood publishing - let (mut gs, _, receivers, _) = inject_nodes1() + let (mut gs, _, queues, _) = inject_nodes1() .peer_no(config.mesh_n_high() + 10) .topics(vec![topic.into()]) .to_subscribe(true) @@ -2262,12 +2215,11 @@ fn test_flood_publish() { gs.publish(Topic::new(topic), publish_data).unwrap(); // Collect all publish messages - let publishes = receivers + let publishes = queues .into_values() - .fold(vec![], |mut collected_publish, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + .fold(vec![], |mut collected_publish, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { collected_publish.push(message); } } @@ -2306,7 +2258,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { // add more peers than in mesh to test gossipping // by default only mesh_n_low peers will get added to mesh - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(config.mesh_n_low() + config.gossip_lazy() + 1) .topics(vec!["topic".into()]) .to_subscribe(true) @@ -2333,7 +2285,7 @@ fn test_gossip_to_at_least_gossip_lazy_peers() { let msg_id = gs.config.message_id(message); // check that exactly config.gossip_lazy() many gossip messages were sent. - let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + let (control_msgs, _) = count_control_msgs(queues, |_, action| match action { RpcOut::IHave(IHave { topic_hash, message_ids, @@ -2349,7 +2301,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { // add a lot of peers let m = config.mesh_n_low() + config.gossip_lazy() * (2.0 / config.gossip_factor()) as usize; - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(m) .topics(vec!["topic".into()]) .to_subscribe(true) @@ -2375,7 +2327,7 @@ fn test_gossip_to_at_most_gossip_factor_peers() { let msg_id = gs.config.message_id(message); // check that exactly config.gossip_lazy() many gossip messages were sent. - let (control_msgs, _) = count_control_msgs(receivers, |_, action| match action { + let (control_msgs, _) = count_control_msgs(queues, |_, action| match action { RpcOut::IHave(IHave { topic_hash, message_ids, @@ -2408,8 +2360,8 @@ fn test_accept_only_outbound_peer_grafts_when_mesh_full() { assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); // create an outbound and an inbound peer - let (inbound, _in_receiver) = add_peer(&mut gs, &topics, false, false); - let (outbound, _out_receiver) = add_peer(&mut gs, &topics, true, false); + let (inbound, _in_queue) = add_peer(&mut gs, &topics, false, false); + let (outbound, _out_queue) = add_peer(&mut gs, &topics, true, false); // send grafts gs.handle_graft(&inbound, vec![topics[0].clone()]); @@ -2439,7 +2391,7 @@ fn test_do_not_remove_too_many_outbound_peers() { .unwrap(); // fill the mesh with inbound connections - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2512,7 +2464,7 @@ fn test_prune_negative_scored_peers() { let config = Config::default(); // build mesh with one peer - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2535,7 +2487,7 @@ fn test_prune_negative_scored_peers() { assert!(gs.mesh[&topics[0]].is_empty()); // check prune message - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[0] && match m { RpcOut::Prune(Prune { @@ -2570,8 +2522,8 @@ fn test_dont_graft_to_negative_scored_peers() { .create_network(); // add two additional peers that will not be part of the mesh - let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); - let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); + let (p1, _queue1) = add_peer(&mut gs, &topics, false, false); + let (p2, _queue2) = add_peer(&mut gs, &topics, false, false); // reduce score of p1 to negative gs.as_peer_score_mut().add_penalty(&p1, 1); @@ -2644,7 +2596,7 @@ fn test_only_send_nonnegative_scoring_peers_in_px() { .unwrap(); // Build mesh with three peer - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(3) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2670,7 +2622,7 @@ fn test_only_send_nonnegative_scoring_peers_in_px() { ); // Check that px in prune message only contains third peer - let (control_msgs, _) = count_control_msgs(receivers, |peer_id, m| { + let (control_msgs, _) = count_control_msgs(queues, |peer_id, m| { peer_id == &peers[1] && match m { RpcOut::Prune(Prune { @@ -2698,7 +2650,7 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { }; // Build full mesh - let (mut gs, peers, mut receivers, topics) = inject_nodes1() + let (mut gs, peers, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2712,10 +2664,10 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { } // Add two additional peers that will not be part of the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // Reduce score of p1 below peer_score_thresholds.gossip_threshold // note that penalties get squared so two penalties means a score of @@ -2746,7 +2698,7 @@ fn test_do_not_gossip_to_peers_below_gossip_threshold() { gs.emit_gossip(); // Check that exactly one gossip messages got sent and it got sent to p2 - let (control_msgs, _) = count_control_msgs(receivers, |peer, action| match action { + let (control_msgs, _) = count_control_msgs(queues, |peer, action| match action { RpcOut::IHave(IHave { topic_hash, message_ids, @@ -2773,7 +2725,7 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { }; // Build full mesh - let (mut gs, peers, mut receivers, topics) = inject_nodes1() + let (mut gs, peers, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2789,10 +2741,10 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { } // Add two additional peers that will not be part of the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // Reduce score of p1 below peer_score_thresholds.gossip_threshold // note that penalties get squared so two penalties means a score of @@ -2824,12 +2776,11 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { // the messages we are sending let sent_messages = - receivers + queues .into_iter() - .fold(vec![], |mut collected_messages, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::Forward { message, .. }) = non_priority.try_recv() { + .fold(vec![], |mut collected_messages, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::Forward { message, .. }) = queue.try_pop() { collected_messages.push((peer_id, message)); } } @@ -2863,7 +2814,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { ..PeerScoreThresholds::default() }; // build full mesh - let (mut gs, peers, mut receivers, topics) = inject_nodes1() + let (mut gs, peers, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(true) @@ -2879,10 +2830,10 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { } // add two additional peers that will not be part of the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // reduce score of p1 below peer_score_thresholds.gossip_threshold // note that penalties get squared so two penalties means a score of @@ -2912,7 +2863,7 @@ fn test_ihave_msg_from_peer_below_gossip_threshold_gets_ignored() { gs.handle_ihave(&p2, vec![(topics[0].clone(), vec![msg_id.clone()])]); // check that we sent exactly one IWANT request to p2 - let (control_msgs, _) = count_control_msgs(receivers, |peer, c| match c { + let (control_msgs, _) = count_control_msgs(queues, |peer, c| match c { RpcOut::IWant(IWant { message_ids }) => { if message_ids.iter().any(|m| m == &msg_id) { assert_eq!(peer, &p2); @@ -2940,7 +2891,7 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { }; // build mesh with no peers and no subscribed topics - let (mut gs, _, mut receivers, _) = inject_nodes1() + let (mut gs, _, mut queues, _) = inject_nodes1() .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); @@ -2950,10 +2901,10 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { let topics = vec![topic.hash()]; // add two additional peers that will be added to the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // reduce score of p1 below peer_score_thresholds.publish_threshold // note that penalties get squared so two penalties means a score of @@ -2971,17 +2922,17 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { gs.publish(topic, publish_data).unwrap(); // Collect all publish messages - let publishes = receivers - .into_iter() - .fold(vec![], |mut collected_publish, (peer_id, c)| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { - collected_publish.push((peer_id, message)); + let publishes = + queues + .into_iter() + .fold(vec![], |mut collected_publish, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { + collected_publish.push((peer_id, message)); + } } - } - collected_publish - }); + collected_publish + }); // assert only published to p2 assert_eq!(publishes.len(), 1); @@ -2998,17 +2949,17 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { ..PeerScoreThresholds::default() }; // build mesh with no peers - let (mut gs, _, mut receivers, topics) = inject_nodes1() + let (mut gs, _, mut queues, topics) = inject_nodes1() .topics(vec!["test".into()]) .gs_config(config) .scoring(Some((peer_score_params, peer_score_thresholds))) .create_network(); // add two additional peers that will be added to the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // reduce score of p1 below peer_score_thresholds.publish_threshold // note that penalties get squared so two penalties means a score of @@ -3026,17 +2977,17 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect all publish messages - let publishes = receivers - .into_iter() - .fold(vec![], |mut collected_publish, (peer_id, c)| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { - collected_publish.push((peer_id, message)) + let publishes = + queues + .into_iter() + .fold(vec![], |mut collected_publish, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { + collected_publish.push((peer_id, message)) + } } - } - collected_publish - }); + collected_publish + }); // assert only published to p2 assert_eq!(publishes.len(), 1); @@ -3062,8 +3013,8 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { .create_network(); // add two additional peers that will be added to the mesh - let (p1, _receiver1) = add_peer(&mut gs, &topics, false, false); - let (p2, _receiver2) = add_peer(&mut gs, &topics, false, false); + let (p1, _queue1) = add_peer(&mut gs, &topics, false, false); + let (p2, _queue2) = add_peer(&mut gs, &topics, false, false); // reduce score of p1 below peer_score_thresholds.graylist_threshold // note that penalties get squared so two penalties means a score of @@ -3256,7 +3207,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { // build mesh with more peers than mesh can hold let n = config.mesh_n_high() + 1; - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(n) .topics(vec!["test".into()]) .to_subscribe(true) @@ -4369,7 +4320,7 @@ fn test_scoring_p7_grafts_before_backoff() { ..Default::default() }; - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(2) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4446,7 +4397,7 @@ fn test_opportunistic_grafting() { ..Default::default() }; - let (mut gs, peers, _receivers, topics) = inject_nodes1() + let (mut gs, peers, _queues, topics) = inject_nodes1() .peer_no(5) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4475,7 +4426,7 @@ fn test_opportunistic_grafting() { } // set scores for peers in the mesh - for (i, (peer, _receiver)) in others.iter().enumerate().take(5) { + for (i, (peer, _queue)) in others.iter().enumerate().take(5) { gs.set_application_score(peer, 0.0 + i as f64); } @@ -4523,7 +4474,7 @@ fn test_opportunistic_grafting() { #[test] fn test_ignore_graft_from_unknown_topic() { // build gossipsub without subscribing to any topics - let (mut gs, peers, receivers, _) = inject_nodes1() + let (mut gs, peers, queues, _) = inject_nodes1() .peer_no(1) .topics(vec![]) .to_subscribe(false) @@ -4533,7 +4484,7 @@ fn test_ignore_graft_from_unknown_topic() { gs.handle_graft(&peers[0], vec![Topic::new("test").hash()]); // assert that no prune got created - let (control_msgs, _) = count_control_msgs(receivers, |_, a| matches!(a, RpcOut::Prune { .. })); + let (control_msgs, _) = count_control_msgs(queues, |_, a| matches!(a, RpcOut::Prune { .. })); assert_eq!( control_msgs, 0, "we should not prune after graft in unknown topic" @@ -4544,15 +4495,15 @@ fn test_ignore_graft_from_unknown_topic() { fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { let config = Config::default(); // build gossipsub with full mesh - let (mut gs, _, mut receivers, topics) = inject_nodes1() + let (mut gs, _, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); // add another peer not in the mesh - let (peer, receiver) = add_peer(&mut gs, &topics, false, false); - receivers.insert(peer, receiver); + let (peer, queue) = add_peer(&mut gs, &topics, false, false); + queues.insert(peer, queue); // receive a message let mut seq = 0; @@ -4566,7 +4517,7 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { gs.handle_received_message(m1, &PeerId::random()); // clear events - let receivers = flush_events(&mut gs, receivers); + let queues = flush_events(&mut gs, queues); // the first gossip_retransimission many iwants return the valid message, all others are // ignored. @@ -4575,10 +4526,9 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { } assert_eq!( - receivers.into_values().fold(0, |mut fwds, c| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { + queues.into_values().fold(0, |mut fwds, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Forward { .. }) = queue.try_pop() { fwds += 1; } } @@ -4596,7 +4546,7 @@ fn test_ignore_too_many_ihaves() { .build() .unwrap(); // build gossipsub with full mesh - let (mut gs, _, mut receivers, topics) = inject_nodes1() + let (mut gs, _, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4604,8 +4554,8 @@ fn test_ignore_too_many_ihaves() { .create_network(); // add another peer not in the mesh - let (peer, receiver) = add_peer(&mut gs, &topics, false, false); - receivers.insert(peer, receiver); + let (peer, queue) = add_peer(&mut gs, &topics, false, false); + queues.insert(peer, queue); // peer has 20 messages let mut seq = 0; @@ -4633,7 +4583,7 @@ fn test_ignore_too_many_ihaves() { .collect(); // we send iwant only for the first 10 messages - let (control_msgs, receivers) = count_control_msgs(receivers, |p, action| { + let (control_msgs, queues) = count_control_msgs(queues, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1 && first_ten.contains(&message_ids[0])) }); @@ -4659,7 +4609,7 @@ fn test_ignore_too_many_ihaves() { } // we sent iwant for all 10 messages - let (control_msgs, _) = count_control_msgs(receivers, |p, action| { + let (control_msgs, _) = count_control_msgs(queues, |p, action| { p == &peer && matches!(action, RpcOut::IWant(IWant { message_ids }) if message_ids.len() == 1) }); @@ -4674,7 +4624,7 @@ fn test_ignore_too_many_messages_in_ihave() { .build() .unwrap(); // build gossipsub with full mesh - let (mut gs, _, mut receivers, topics) = inject_nodes1() + let (mut gs, _, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4682,8 +4632,8 @@ fn test_ignore_too_many_messages_in_ihave() { .create_network(); // add another peer not in the mesh - let (peer, receiver) = add_peer(&mut gs, &topics, false, false); - receivers.insert(peer, receiver); + let (peer, queue) = add_peer(&mut gs, &topics, false, false); + queues.insert(peer, queue); // peer has 30 messages let mut seq = 0; @@ -4708,7 +4658,7 @@ fn test_ignore_too_many_messages_in_ihave() { // we send iwant only for the first 10 messages let mut sum = 0; - let (control_msgs, receivers) = count_control_msgs(receivers, |p, rpc| match rpc { + let (control_msgs, queues) = count_control_msgs(queues, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { p == &peer && { assert!(first_twelve.is_superset(&message_ids.iter().collect())); @@ -4734,7 +4684,7 @@ fn test_ignore_too_many_messages_in_ihave() { // we sent 10 iwant messages ids via a IWANT rpc. let mut sum = 0; - let (control_msgs, _) = count_control_msgs(receivers, |p, rpc| match rpc { + let (control_msgs, _) = count_control_msgs(queues, |p, rpc| match rpc { RpcOut::IWant(IWant { message_ids }) => { p == &peer && { sum += message_ids.len(); @@ -4755,7 +4705,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { .build() .unwrap(); // build gossipsub with full mesh - let (mut gs, peers, mut receivers, topics) = inject_nodes1() + let (mut gs, peers, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_high()) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4768,10 +4718,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { } // add two other peers not in the mesh - let (p1, receiver1) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p1, receiver1); - let (p2, receiver2) = add_peer(&mut gs, &topics, false, false); - receivers.insert(p2, receiver2); + let (p1, queue1) = add_peer(&mut gs, &topics, false, false); + queues.insert(p1, queue1); + let (p2, queue2) = add_peer(&mut gs, &topics, false, false); + queues.insert(p2, queue2); // receive 200 messages from another peer let mut seq = 0; @@ -4789,7 +4739,7 @@ fn test_limit_number_of_message_ids_inside_ihave() { let mut ihaves1 = HashSet::new(); let mut ihaves2 = HashSet::new(); - let (control_msgs, _) = count_control_msgs(receivers, |p, action| match action { + let (control_msgs, _) = count_control_msgs(queues, |p, action| match action { RpcOut::IHave(IHave { message_ids, .. }) => { if p == &p1 { ihaves1 = message_ids.iter().cloned().collect(); @@ -4870,7 +4820,7 @@ fn test_iwant_penalties() { let mut first_messages = Vec::new(); let mut second_messages = Vec::new(); let mut seq = 0; - for (peer, _receiver) in &other_peers { + for (peer, _queue) in &other_peers { let msg1 = random_message(&mut seq, &topics); let msg2 = random_message(&mut seq, &topics); @@ -4893,19 +4843,19 @@ fn test_iwant_penalties() { } // the peers send us all the first message ids in time - for (index, (peer, _receiver)) in other_peers.iter().enumerate() { + for (index, (peer, _queue)) in other_peers.iter().enumerate() { gs.handle_received_message(first_messages[index].clone(), peer); } // now we do a heartbeat no penalization should have been applied yet gs.heartbeat(); - for (peer, _receiver) in &other_peers { + for (peer, _queue) in &other_peers { assert_eq!(gs.as_peer_score_mut().score_report(peer).score, 0.0); } // receive the first twenty of the other peers then send their response - for (index, (peer, _receiver)) in other_peers.iter().enumerate().take(20) { + for (index, (peer, _queue)) in other_peers.iter().enumerate().take(20) { gs.handle_received_message(second_messages[index].clone(), peer); } @@ -4916,7 +4866,7 @@ fn test_iwant_penalties() { gs.heartbeat(); // now we get the second messages from the last 80 peers. - for (index, (peer, _receiver)) in other_peers.iter().enumerate() { + for (index, (peer, _queue)) in other_peers.iter().enumerate() { if index > 19 { gs.handle_received_message(second_messages[index].clone(), peer); } @@ -4930,7 +4880,7 @@ fn test_iwant_penalties() { let mut single_penalized = 0; let mut double_penalized = 0; - for (i, (peer, _receiver)) in other_peers.iter().enumerate() { + for (i, (peer, _queue)) in other_peers.iter().enumerate() { let score = gs.as_peer_score_mut().score_report(peer).score; if score == 0.0 { not_penalized += 1; @@ -4958,7 +4908,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .flood_publish(false) .build() .unwrap(); - let (mut gs, _, mut receivers, topics) = inject_nodes1() + let (mut gs, _, mut queues, topics) = inject_nodes1() .peer_no(config.mesh_n_low() - 1) .topics(vec!["test".into()]) .to_subscribe(false) @@ -4966,7 +4916,7 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .create_network(); // add two floodsub peer, one explicit, one implicit - let (p1, receiver1) = add_peer_with_addr_and_kind( + let (p1, queue1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -4974,11 +4924,11 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { Multiaddr::empty(), Some(PeerKind::Floodsub), ); - receivers.insert(p1, receiver1); + queues.insert(p1, queue1); - let (p2, receiver2) = + let (p2, queue2) = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - receivers.insert(p2, receiver2); + queues.insert(p2, queue2); // p1 and p2 are not in the mesh assert!(!gs.mesh[&topics[0]].contains(&p1) && !gs.mesh[&topics[0]].contains(&p2)); @@ -4988,13 +4938,12 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect publish messages to floodsub peers - let publishes = receivers + let publishes = queues .into_iter() - .fold(0, |mut collected_publish, (peer_id, c)| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if matches!(priority.try_recv(), - Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + .fold(0, |mut collected_publish, (peer_id, mut queue)| { + while !queue.is_empty() { + if matches!(queue.try_pop(), + Some(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) { collected_publish += 1; } @@ -5014,7 +4963,7 @@ fn test_do_not_use_floodsub_in_fanout() { .flood_publish(false) .build() .unwrap(); - let (mut gs, _, mut receivers, _) = inject_nodes1() + let (mut gs, _, mut queues, _) = inject_nodes1() .peer_no(config.mesh_n_low() - 1) .topics(Vec::new()) .to_subscribe(false) @@ -5025,7 +4974,7 @@ fn test_do_not_use_floodsub_in_fanout() { let topics = vec![topic.hash()]; // add two floodsub peer, one explicit, one implicit - let (p1, receiver1) = add_peer_with_addr_and_kind( + let (p1, queue1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -5034,23 +4983,22 @@ fn test_do_not_use_floodsub_in_fanout() { Some(PeerKind::Floodsub), ); - receivers.insert(p1, receiver1); - let (p2, receiver2) = + queues.insert(p1, queue1); + let (p2, queue2) = add_peer_with_addr_and_kind(&mut gs, &topics, false, false, Multiaddr::empty(), None); - receivers.insert(p2, receiver2); + queues.insert(p2, queue2); // publish a message let publish_data = vec![0; 42]; gs.publish(Topic::new("test"), publish_data).unwrap(); // Collect publish messages to floodsub peers - let publishes = receivers + let publishes = queues .into_iter() - .fold(0, |mut collected_publish, (peer_id, c)| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if matches!(priority.try_recv(), - Ok(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) + .fold(0, |mut collected_publish, (peer_id, mut queue)| { + while !queue.is_empty() { + if matches!(queue.try_pop(), + Some(RpcOut::Publish{..}) if peer_id == p1 || peer_id == p2) { collected_publish += 1; } @@ -5101,14 +5049,14 @@ fn test_dont_add_floodsub_peers_to_mesh_on_join() { #[test] fn test_dont_send_px_to_old_gossipsub_peers() { - let (mut gs, _, receivers, topics) = inject_nodes1() + let (mut gs, _, queues, topics) = inject_nodes1() .peer_no(0) .topics(vec!["test".into()]) .to_subscribe(false) .create_network(); // add an old gossipsub peer - let (p1, _receiver1) = add_peer_with_addr_and_kind( + let (p1, _queue1) = add_peer_with_addr_and_kind( &mut gs, &topics, false, @@ -5125,7 +5073,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() { ); // check that prune does not contain px - let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + let (control_msgs, _) = count_control_msgs(queues, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, }); @@ -5135,7 +5083,7 @@ fn test_dont_send_px_to_old_gossipsub_peers() { #[test] fn test_dont_send_floodsub_peers_in_px() { // build mesh with one peer - let (mut gs, peers, receivers, topics) = inject_nodes1() + let (mut gs, peers, queues, topics) = inject_nodes1() .peer_no(1) .topics(vec!["test".into()]) .to_subscribe(true) @@ -5160,7 +5108,7 @@ fn test_dont_send_floodsub_peers_in_px() { ); // check that px in prune message is empty - let (control_msgs, _) = count_control_msgs(receivers, |_, m| match m { + let (control_msgs, _) = count_control_msgs(queues, |_, m| match m { RpcOut::Prune(Prune { peers: px, .. }) => !px.is_empty(), _ => false, }); @@ -5251,14 +5199,14 @@ fn test_subscribe_and_graft_with_negative_score() { ))) .create_network(); - let (mut gs2, _, receivers, _) = inject_nodes1().create_network(); + let (mut gs2, _, queues, _) = inject_nodes1().create_network(); let connection_id = ConnectionId::new_unchecked(0); let topic = Topic::new("test"); - let (p2, _receiver1) = add_peer(&mut gs1, &Vec::new(), true, false); - let (p1, _receiver2) = add_peer(&mut gs2, &topic_hashes, false, false); + let (p2, _queue1) = add_peer(&mut gs1, &Vec::new(), true, false); + let (p1, _queue2) = add_peer(&mut gs2, &topic_hashes, false, false); // add penalty to peer p2 gs1.as_peer_score_mut().add_penalty(&p2, 1); @@ -5272,13 +5220,12 @@ fn test_subscribe_and_graft_with_negative_score() { p1: PeerId, p2: PeerId, connection_id: ConnectionId, - receivers: HashMap| - -> HashMap { - let new_receivers = HashMap::new(); - for (peer_id, receiver) in receivers.into_iter() { - let non_priority = receiver.non_priority.get_ref(); - match non_priority.try_recv() { - Ok(rpc) if peer_id == p1 => { + queues: HashMap| + -> HashMap { + let new_queues = HashMap::new(); + for (peer_id, mut receiver_queue) in queues.into_iter() { + match receiver_queue.try_pop() { + Some(rpc) if peer_id == p1 => { gs1.on_connection_handler_event( p2, connection_id, @@ -5291,18 +5238,18 @@ fn test_subscribe_and_graft_with_negative_score() { _ => {} } } - new_receivers + new_queues }; // forward the subscribe message - let receivers = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); + let queues = forward_messages_to_p1(&mut gs1, p1, p2, connection_id, queues); // heartbeats on both gs1.heartbeat(); gs2.heartbeat(); // forward messages again - forward_messages_to_p1(&mut gs1, p1, p2, connection_id, receivers); + forward_messages_to_p1(&mut gs1, p1, p2, connection_id, queues); // nobody got penalized assert!(gs1.as_peer_score_mut().score_report(&p2).score >= original_score); @@ -5344,7 +5291,7 @@ fn test_graft_without_subscribe() { /// that run Gossipsub v1.2. #[test] fn sends_idontwant() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(5) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5366,12 +5313,11 @@ fn sends_idontwant() { }; gs.handle_received_message(message.clone(), &local_id); assert_eq!( - receivers + queues .into_iter() - .fold(0, |mut idontwants, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + .fold(0, |mut idontwants, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::IDontWant(_)) = queue.try_pop() { assert_ne!(peer_id, peers[1]); idontwants += 1; } @@ -5385,7 +5331,7 @@ fn sends_idontwant() { #[test] fn doesnt_sends_idontwant_for_lower_message_size() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(5) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5408,12 +5354,11 @@ fn doesnt_sends_idontwant_for_lower_message_size() { gs.handle_received_message(message.clone(), &local_id); assert_eq!( - receivers + queues .into_iter() - .fold(0, |mut idontwants, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::IDontWant(_)) = non_priority.try_recv() { + .fold(0, |mut idontwants, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::IDontWant(_)) = queue.try_pop() { assert_ne!(peer_id, peers[1]); idontwants += 1; } @@ -5429,7 +5374,7 @@ fn doesnt_sends_idontwant_for_lower_message_size() { /// that don't run Gossipsub v1.2. #[test] fn doesnt_send_idontwant() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(5) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5451,12 +5396,12 @@ fn doesnt_send_idontwant() { }; gs.handle_received_message(message.clone(), &local_id); assert_eq!( - receivers + queues .into_iter() - .fold(0, |mut idontwants, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if matches!(non_priority.try_recv(), Ok(RpcOut::IDontWant(_)) if peer_id != peers[1]) { + .fold(0, |mut idontwants, (peer_id, mut queue)| { + while !queue.is_empty() { + if matches!(queue.try_pop(), Some(RpcOut::IDontWant(_)) if peer_id != peers[1]) + { idontwants += 1; } } @@ -5471,7 +5416,7 @@ fn doesnt_send_idontwant() { /// that sent IDONTWANT. #[test] fn doesnt_forward_idontwant() { - let (mut gs, peers, receivers, topic_hashes) = inject_nodes1() + let (mut gs, peers, queues, topic_hashes) = inject_nodes1() .peer_no(4) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5501,16 +5446,17 @@ fn doesnt_forward_idontwant() { gs.handle_received_message(raw_message.clone(), &local_id); assert_eq!( - receivers.into_iter().fold(0, |mut fwds, (peer_id, c)| { - let non_priority = c.non_priority.get_ref(); - while !non_priority.is_empty() { - if let Ok(RpcOut::Forward { .. }) = non_priority.try_recv() { - assert_ne!(peer_id, peers[2]); - fwds += 1; + queues + .into_iter() + .fold(0, |mut fwds, (peer_id, mut queue)| { + while !queue.is_empty() { + if let Some(RpcOut::Forward { .. }) = queue.try_pop() { + assert_ne!(peer_id, peers[2]); + fwds += 1; + } } - } - fwds - }), + fwds + }), 2, "IDONTWANT was not sent" ); @@ -5520,7 +5466,7 @@ fn doesnt_forward_idontwant() { /// IDONTWANT message to the respective peer. #[test] fn parses_idontwant() { - let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + let (mut gs, peers, _queues, _topic_hashes) = inject_nodes1() .peer_no(2) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5552,7 +5498,7 @@ fn parses_idontwant() { /// Test that a node clears stale IDONTWANT messages. #[test] fn clear_stale_idontwant() { - let (mut gs, peers, _receivers, _topic_hashes) = inject_nodes1() + let (mut gs, peers, _queues, _topic_hashes) = inject_nodes1() .peer_no(4) .topics(vec![String::from("topic1")]) .to_subscribe(true) @@ -5593,15 +5539,14 @@ fn test_all_queues_full() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); - let publish_data = vec![0; 42]; - gs.publish(topic_hash.clone(), publish_data.clone()) - .unwrap(); let publish_data = vec![2; 59]; + let result = gs.publish(topic_hash.clone(), publish_data.clone()); + assert!(result.is_ok()); let err = gs.publish(topic_hash, publish_data).unwrap_err(); assert!(matches!(err, PublishError::AllQueuesFull(f) if f == 1)); } @@ -5622,6 +5567,8 @@ fn test_slow_peer_returns_failed_publish() { let slow_peer_id = PeerId::random(); peers.push(slow_peer_id); + let mesh = gs.mesh.entry(topic_hash.clone()).or_default(); + mesh.insert(slow_peer_id); gs.connected_peers.insert( slow_peer_id, PeerDetails { @@ -5629,7 +5576,7 @@ fn test_slow_peer_returns_failed_publish() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); @@ -5642,43 +5589,34 @@ fn test_slow_peer_returns_failed_publish() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); let publish_data = vec![0; 42]; - gs.publish(topic_hash.clone(), publish_data.clone()) - .unwrap(); - let publish_data = vec![2; 59]; - gs.publish(topic_hash.clone(), publish_data).unwrap(); + let _failed_publish = gs.publish(topic_hash.clone(), publish_data.clone()); + let _failed_publish = gs.publish(topic_hash.clone(), publish_data.clone()); gs.heartbeat(); - gs.heartbeat(); - - let slow_peer_failed_messages = match gs.events.pop_front().unwrap() { - ToSwarm::GenerateEvent(Event::SlowPeer { - peer_id, - failed_messages, - }) if peer_id == slow_peer_id => failed_messages, - _ => panic!("invalid event"), - }; + let slow_peer_failed_messages = gs + .events + .into_iter() + .find_map(|e| match e { + ToSwarm::GenerateEvent(Event::SlowPeer { + peer_id, + failed_messages, + }) if peer_id == slow_peer_id => Some(failed_messages), + _ => None, + }) + .expect("No SlowPeer event found"); let failed_messages = FailedMessages { - publish: 1, - forward: 0, - priority: 1, - non_priority: 0, - timeout: 0, + priority: 0, + non_priority: 1, }; - assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); - assert_eq!( - slow_peer_failed_messages.non_priority, - failed_messages.non_priority - ); - assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); - assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); + assert_eq!(slow_peer_failed_messages, failed_messages); } #[test] @@ -5703,7 +5641,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); @@ -5720,7 +5658,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); @@ -5778,20 +5716,11 @@ fn test_slow_peer_returns_failed_ihave_handling() { .unwrap(); let failed_messages = FailedMessages { - publish: 0, - forward: 0, priority: 0, non_priority: 1, - timeout: 0, }; - assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); - assert_eq!( - slow_peer_failed_messages.non_priority, - failed_messages.non_priority - ); - assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); - assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); + assert_eq!(slow_peer_failed_messages, failed_messages); } #[test] @@ -5817,7 +5746,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); @@ -5834,7 +5763,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); @@ -5872,20 +5801,11 @@ fn test_slow_peer_returns_failed_iwant_handling() { .unwrap(); let failed_messages = FailedMessages { - publish: 0, - forward: 1, priority: 0, non_priority: 1, - timeout: 0, }; - assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); - assert_eq!( - slow_peer_failed_messages.non_priority, - failed_messages.non_priority - ); - assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); - assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); + assert_eq!(slow_peer_failed_messages, failed_messages); } #[test] @@ -5911,7 +5831,7 @@ fn test_slow_peer_returns_failed_forward() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); @@ -5928,7 +5848,7 @@ fn test_slow_peer_returns_failed_forward() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); @@ -5966,20 +5886,11 @@ fn test_slow_peer_returns_failed_forward() { .unwrap(); let failed_messages = FailedMessages { - publish: 0, - forward: 1, - priority: 0, non_priority: 1, - timeout: 0, + priority: 0, }; - assert_eq!(slow_peer_failed_messages.priority, failed_messages.priority); - assert_eq!( - slow_peer_failed_messages.non_priority, - failed_messages.non_priority - ); - assert_eq!(slow_peer_failed_messages.publish, failed_messages.publish); - assert_eq!(slow_peer_failed_messages.forward, failed_messages.forward); + assert_eq!(slow_peer_failed_messages, failed_messages); } #[test] @@ -6010,7 +5921,7 @@ fn test_slow_peer_is_downscored_on_publish() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(2), + messages: Queue::new(1), dont_send: LinkedHashMap::new(), }, ); @@ -6024,7 +5935,7 @@ fn test_slow_peer_is_downscored_on_publish() { connections: vec![ConnectionId::new_unchecked(0)], outbound: false, topics: topics.clone(), - sender: Sender::new(gs.config.connection_handler_queue_len()), + messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), }, ); @@ -6035,47 +5946,9 @@ fn test_slow_peer_is_downscored_on_publish() { let publish_data = vec![2; 59]; gs.publish(topic_hash.clone(), publish_data).unwrap(); gs.heartbeat(); - let slow_peer_score = gs.as_peer_score_mut().score_report(&slow_peer_id).score; - assert_eq!(slow_peer_score, slow_peer_params.slow_peer_weight); -} - -#[tokio::test] -async fn test_timedout_messages_are_reported() { - let gs_config = ConfigBuilder::default() - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - - let mut gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, gs_config).unwrap(); - - let sender = Sender::new(2); - let topic_hash = Topic::new("Test").hash(); - let publish_data = vec![2; 59]; - let raw_message = gs.build_raw_message(topic_hash, publish_data).unwrap(); - - sender - .send_message(RpcOut::Publish { - message: raw_message, - timeout: Delay::new(Duration::from_nanos(1)), - }) - .unwrap(); - let mut receiver = sender.new_receiver(); - let stale = future::poll_fn(|cx| receiver.poll_stale(cx)).await.unwrap(); - assert!(matches!(stale, RpcOut::Publish { .. })); -} - -#[test] -fn test_priority_messages_are_always_sent() { - let sender = Sender::new(2); - let topic_hash = Topic::new("Test").hash(); - // Fill the buffer with the first message. - assert!(sender - .send_message(RpcOut::Subscribe(topic_hash.clone())) - .is_ok()); - assert!(sender - .send_message(RpcOut::Subscribe(topic_hash.clone())) - .is_ok()); - assert!(sender.send_message(RpcOut::Unsubscribe(topic_hash)).is_ok()); + let slow_peer_score = gs.peer_score(&slow_peer_id).unwrap(); + // There should be two penalties for the two failed messages. + assert_eq!(slow_peer_score, slow_peer_params.slow_peer_weight * 2.0); } /// Test that specific topic configurations are correctly applied @@ -6422,7 +6295,7 @@ fn test_fanout_with_topic_config() { .build() .unwrap(); - let (mut gs, _, receivers, topic_hashes) = inject_nodes1() + let (mut gs, _, queues, topic_hashes) = inject_nodes1() .peer_no(10) // More than mesh_n .topics(vec![topic.clone()]) .to_subscribe(true) @@ -6445,12 +6318,11 @@ fn test_fanout_with_topic_config() { ); // Collect publish messages - let publishes = receivers + let publishes = queues .into_values() - .fold(vec![], |mut collected_publish, c| { - let priority = c.priority.get_ref(); - while !priority.is_empty() { - if let Ok(RpcOut::Publish { message, .. }) = priority.try_recv() { + .fold(vec![], |mut collected_publish, mut queue| { + while !queue.is_empty() { + if let Some(RpcOut::Publish { message, .. }) = queue.try_pop() { collected_publish.push(message); } } diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index a2d05d8a3ff..95c1def59cd 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -37,7 +37,7 @@ use web_time::Instant; use crate::{ protocol::{GossipsubCodec, ProtocolConfig}, - rpc::Receiver, + queue::Queue, rpc_proto::proto, types::{PeerKind, RawMessage, RpcIn, RpcOut}, ValidationError, @@ -98,8 +98,8 @@ pub struct EnabledHandler { /// The single long-lived inbound substream. inbound_substream: Option, - /// Queue of values that we want to send to the remote - send_queue: Receiver, + /// Queue of dispatched Rpc messages to send. + message_queue: Queue, /// Flag indicating that an outbound substream is being established to prevent duplicate /// requests. @@ -162,7 +162,7 @@ enum OutboundSubstreamState { impl Handler { /// Builds a new [`Handler`]. - pub fn new(protocol_config: ProtocolConfig, message_queue: Receiver) -> Self { + pub(crate) fn new(protocol_config: ProtocolConfig, message_queue: Queue) -> Self { Handler::Enabled(EnabledHandler { listen_protocol: protocol_config, inbound_substream: None, @@ -170,7 +170,7 @@ impl Handler { outbound_substream_establishing: false, outbound_substream_attempts: 0, inbound_substream_attempts: 0, - send_queue: message_queue, + message_queue, peer_kind: None, peer_kind_sent: false, last_io_activity: Instant::now(), @@ -234,7 +234,7 @@ impl EnabledHandler { } // determine if we need to create the outbound stream - if !self.send_queue.poll_is_empty(cx) + if !self.message_queue.is_empty() && self.outbound_substream.is_none() && !self.outbound_substream_establishing { @@ -252,15 +252,18 @@ impl EnabledHandler { { // outbound idle state Some(OutboundSubstreamState::WaitingOutput(substream)) => { - if let Poll::Ready(Some(mut message)) = self.send_queue.poll_next_unpin(cx) { + if let Poll::Ready(mut message) = Pin::new(&mut self.message_queue).poll_pop(cx) + { match message { RpcOut::Publish { message: _, ref mut timeout, + .. } | RpcOut::Forward { message: _, ref mut timeout, + .. } => { if Pin::new(timeout).poll(cx).is_ready() { // Inform the behaviour and end the poll. @@ -407,13 +410,6 @@ impl EnabledHandler { } } - // Drop the next message in queue if it's stale. - if let Poll::Ready(Some(rpc)) = self.send_queue.poll_stale(cx) { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::MessageDropped(rpc), - )); - } - Poll::Pending } } diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index a116900be0e..f1d42d6cddb 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -105,7 +105,7 @@ mod mcache; mod metrics; mod peer_score; mod protocol; -mod rpc; +mod queue; mod rpc_proto; mod subscription_filter; mod time_cache; diff --git a/protocols/gossipsub/src/metrics.rs b/protocols/gossipsub/src/metrics.rs index 37fe5481689..1394d9a92a7 100644 --- a/protocols/gossipsub/src/metrics.rs +++ b/protocols/gossipsub/src/metrics.rs @@ -133,12 +133,6 @@ pub(crate) struct Metrics { ignored_messages: Family, /// The number of messages rejected by the application (validation result). rejected_messages: Family, - /// The number of publish messages dropped by the sender. - publish_messages_dropped: Family, - /// The number of forward messages dropped by the sender. - forward_messages_dropped: Family, - /// The number of messages that timed out and could not be sent. - timedout_messages_dropped: Family, // Metrics regarding mesh state /// Number of peers in our mesh. This metric should be updated with the count of peers for a @@ -193,10 +187,15 @@ pub(crate) struct Metrics { /// The number of msg_id's we have received in every IDONTWANT control message. idontwant_msgs_ids: Counter, - /// The size of the priority queue. - priority_queue_size: Histogram, - /// The size of the non-priority queue. - non_priority_queue_size: Histogram, + /// The size of the queue by priority. + queue_size: Family, + + /// Failed messages by message type. + failed_messages: Family, + + /// The number of messages we have removed from a queue that we would otherwise send. A rough + /// guide to measure of bandwidth saved. + removed_queued_messages: Counter, } impl Metrics { @@ -245,21 +244,6 @@ impl Metrics { "Number of rejected messages received for each topic" ); - let publish_messages_dropped = register_family!( - "publish_messages_dropped_per_topic", - "Number of publish messages dropped per topic" - ); - - let forward_messages_dropped = register_family!( - "forward_messages_dropped_per_topic", - "Number of forward messages dropped per topic" - ); - - let timedout_messages_dropped = register_family!( - "timedout_messages_dropped_per_topic", - "Number of timedout messages dropped per topic" - ); - let mesh_peer_counts = register_family!( "mesh_peer_counts", "Number of peers in each topic in our mesh" @@ -361,20 +345,36 @@ impl Metrics { metric }; - let priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + let queue_size = Family::::new_with_constructor(|| { + Histogram::new(linear_buckets(0.0, 50.0, 100)) + }); registry.register( - "priority_queue_size", - "Histogram of observed priority queue sizes", - priority_queue_size.clone(), + "queue_size", + "Histogram of observed queue sizes", + queue_size.clone(), ); - let non_priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100)); + let failed_messages = Family::::new_with_constructor(|| { + Histogram::new([ + 0.0, 1.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 1000.0, 2000.0, + ]) + }); registry.register( - "non_priority_queue_size", - "Histogram of observed non-priority queue sizes", - non_priority_queue_size.clone(), + "failed_messages", + "Histogram of observed failed messages by type", + failed_messages.clone(), ); + let removed_queued_messages = { + let metric = Counter::default(); + registry.register( + "removed_queued_messages", + "Number of messages we have removed from all our queues due to IDONTWANTs", + metric.clone(), + ); + metric + }; + Self { max_topics, max_never_subscribed_topics, @@ -385,9 +385,6 @@ impl Metrics { accepted_messages, ignored_messages, rejected_messages, - publish_messages_dropped, - forward_messages_dropped, - timedout_messages_dropped, mesh_peer_counts, mesh_peer_inclusion_events, mesh_peer_churn_events, @@ -405,8 +402,9 @@ impl Metrics { topic_iwant_msgs, idontwant_msgs, idontwant_msgs_ids, - priority_queue_size, - non_priority_queue_size, + queue_size, + failed_messages, + removed_queued_messages, } } @@ -537,27 +535,6 @@ impl Metrics { } } - /// Register dropping a Publish message over a topic. - pub(crate) fn publish_msg_dropped(&mut self, topic: &TopicHash) { - if self.register_topic(topic).is_ok() { - self.publish_messages_dropped.get_or_create(topic).inc(); - } - } - - /// Register dropping a Forward message over a topic. - pub(crate) fn forward_msg_dropped(&mut self, topic: &TopicHash) { - if self.register_topic(topic).is_ok() { - self.forward_messages_dropped.get_or_create(topic).inc(); - } - } - - /// Register dropping a message that timedout over a topic. - pub(crate) fn timeout_msg_dropped(&mut self, topic: &TopicHash) { - if self.register_topic(topic).is_ok() { - self.timedout_messages_dropped.get_or_create(topic).inc(); - } - } - /// Register that a message was received (and was not a duplicate). pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) { if self.register_topic(topic).is_ok() { @@ -616,12 +593,20 @@ impl Metrics { /// Observes a priority queue size. pub(crate) fn observe_priority_queue_size(&mut self, len: usize) { - self.priority_queue_size.observe(len as f64); + self.queue_size + .get_or_create(&MessageTypeLabel { + message_type: MessageType::Priority, + }) + .observe(len as f64); } /// Observes a non-priority queue size. pub(crate) fn observe_non_priority_queue_size(&mut self, len: usize) { - self.non_priority_queue_size.observe(len as f64); + self.queue_size + .get_or_create(&MessageTypeLabel { + message_type: MessageType::NonPriority, + }) + .observe(len as f64); } /// Observe a score of a mesh peer. @@ -655,6 +640,30 @@ impl Metrics { self.topic_info.insert(topic_hash, true); } } + + /// Observe the failed priority messages. + pub(crate) fn observe_failed_priority_messages(&mut self, messages: usize) { + self.failed_messages + .get_or_create(&MessageTypeLabel { + message_type: MessageType::Priority, + }) + .observe(messages as f64); + } + + /// Observe the failed non priority messages. + pub(crate) fn observe_failed_non_priority_messages(&mut self, messages: usize) { + self.failed_messages + .get_or_create(&MessageTypeLabel { + message_type: MessageType::NonPriority, + }) + .observe(messages as f64); + } + + /// Register the number of removed messages from the `Handler` queue + /// When receiving IDONTWANT messages + pub(crate) fn register_removed_messages(&mut self, removed_messages: usize) { + self.removed_queued_messages.inc_by(removed_messages as u64); + } } /// Reasons why a peer was included in the mesh. @@ -698,6 +707,15 @@ pub(crate) enum Penalty { IPColocation, } +/// Kinds of messages in the send queue. +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, EncodeLabelValue)] +pub(crate) enum MessageType { + /// A priority message. + Priority, + /// Non Priority Message. + NonPriority, +} + /// Label for the mesh inclusion event metrics. #[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] struct InclusionLabel { @@ -724,6 +742,12 @@ struct PenaltyLabel { penalty: Penalty, } +/// Label for the queue message priority kind. +#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)] +struct MessageTypeLabel { + message_type: MessageType, +} + #[derive(Clone)] struct HistBuilder { buckets: Vec, diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 821c11d2132..74dcc669f55 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -659,6 +659,7 @@ mod tests { let rpc = RpcOut::Publish { message: message.clone(), timeout: Delay::new(Duration::from_secs(1)), + message_id: MessageId(vec![0, 0]), }; let mut codec = diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs new file mode 100644 index 00000000000..ff04392e618 --- /dev/null +++ b/protocols/gossipsub/src/queue.rs @@ -0,0 +1,294 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::{ + collections::{HashMap, VecDeque}, + pin::Pin, + sync::{atomic::AtomicUsize, Arc, Mutex}, + task::{Context, Poll, Waker}, +}; + +use crate::{types::RpcOut, MessageId}; + +const CONTROL_MSGS_LIMIT: usize = 20_000; + +/// An async priority queue used to dispatch messages from the `NetworkBehaviour` +/// Provides a clean abstraction over high-priority (unbounded), control (bounded), +/// and non priority (bounded) message queues. +#[derive(Debug)] +pub(crate) struct Queue { + /// High-priority unbounded queue (Subscribe, Unsubscribe) + pub(crate) priority: Shared, + /// Control messages bounded queue (Graft, Prune, IDontWant) + pub(crate) control: Shared, + /// Low-priority bounded queue (Publish, Forward, IHave, IWant) + pub(crate) non_priority: Shared, + /// The id of the current reference of the counter. + pub(crate) id: usize, + /// The total number of references for the queue. + pub(crate) count: Arc, +} + +impl Queue { + /// Create a new `Queue` with `capacity`. + pub(crate) fn new(capacity: usize) -> Self { + Self { + priority: Shared::new(), + control: Shared::with_capacity(CONTROL_MSGS_LIMIT), + non_priority: Shared::with_capacity(capacity), + id: 1, + count: Arc::new(AtomicUsize::new(1)), + } + } + + /// Try to push a message to the Queue, return Err if the queue is full, + /// which will only happen for control and non priority messages. + pub(crate) fn try_push(&mut self, message: RpcOut) -> Result<(), Box> { + match message { + RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { + self.priority + .try_push(message) + .expect("Shared is unbounded"); + Ok(()) + } + RpcOut::Graft(_) | RpcOut::Prune(_) | RpcOut::IDontWant(_) => { + self.control.try_push(message) + } + RpcOut::Publish { .. } + | RpcOut::Forward { .. } + | RpcOut::IHave(_) + | RpcOut::IWant(_) => self.non_priority.try_push(message), + } + } + + /// Remove pending low priority Publish and Forward messages. + /// Returns the number of messages removed. + pub(crate) fn remove_data_messages(&mut self, message_ids: &[MessageId]) -> usize { + let mut count = 0; + self.non_priority.retain(|message| match message { + RpcOut::Publish { message_id, .. } | RpcOut::Forward { message_id, .. } => { + if message_ids.contains(message_id) { + count += 1; + false + } else { + true + } + } + _ => true, + }); + count + } + + /// Pop an element from the queue. + pub(crate) fn poll_pop(&mut self, cx: &mut Context) -> Poll { + // First we try the priority messages. + if let Poll::Ready(rpc) = Pin::new(&mut self.priority).poll_pop(cx) { + return Poll::Ready(rpc); + } + + // Then we try the control messages. + if let Poll::Ready(rpc) = Pin::new(&mut self.control).poll_pop(cx) { + return Poll::Ready(rpc); + } + + // Finally we try the non priority messages + if let Poll::Ready(rpc) = Pin::new(&mut self.non_priority).poll_pop(cx) { + return Poll::Ready(rpc); + } + + Poll::Pending + } + + /// Check if the queue is empty. + pub(crate) fn is_empty(&self) -> bool { + if !self.priority.is_empty() { + return false; + } + + if !self.control.is_empty() { + return false; + } + + if !self.non_priority.is_empty() { + return false; + } + + true + } + + /// Returns the length of the priority queue. + #[cfg(feature = "metrics")] + pub(crate) fn priority_len(&self) -> usize { + self.priority.len() + self.control.len() + } + + /// Returns the length of the non priority queue. + #[cfg(feature = "metrics")] + pub(crate) fn non_priority_len(&self) -> usize { + self.non_priority.len() + } + + /// Attempts to pop a message from the queue. + /// returns None if the queue is empty. + #[cfg(test)] + pub(crate) fn try_pop(&mut self) -> Option { + // Try priority first + self.priority + .try_pop() + // Then control messages + .or_else(|| self.control.try_pop()) + // Finally non priority + .or_else(|| self.non_priority.try_pop()) + } +} + +impl Clone for Queue { + fn clone(&self) -> Self { + let new_id = self.count.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + Self { + priority: Shared { + inner: self.priority.inner.clone(), + capacity: self.priority.capacity, + id: new_id, + }, + control: Shared { + inner: self.control.inner.clone(), + capacity: self.control.capacity, + id: new_id, + }, + non_priority: Shared { + inner: self.non_priority.inner.clone(), + capacity: self.non_priority.capacity, + id: new_id, + }, + id: self.id, + count: self.count.clone(), + } + } +} + +/// The internal shared part of the queue, +/// that allows for shallow copies of the queue among each connection of the remote. +#[derive(Debug)] +pub(crate) struct Shared { + inner: Arc>, + capacity: Option, + id: usize, +} + +impl Shared { + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + inner: Arc::new(Mutex::new(SharedInner { + queue: VecDeque::new(), + pending_pops: Default::default(), + })), + capacity: Some(capacity), + id: 1, + } + } + + pub(crate) fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(SharedInner { + queue: VecDeque::new(), + pending_pops: Default::default(), + })), + capacity: None, + id: 1, + } + } + + /// Pop an element from the queue. + pub(crate) fn poll_pop(self: std::pin::Pin<&mut Self>, cx: &mut Context) -> Poll { + let mut guard = self.inner.lock().expect("lock to not be poisoned"); + match guard.queue.pop_front() { + Some(t) => Poll::Ready(t), + None => { + guard + .pending_pops + .entry(self.id) + .or_insert(cx.waker().clone()); + Poll::Pending + } + } + } + + pub(crate) fn try_push(&mut self, message: RpcOut) -> Result<(), Box> { + let mut guard = self.inner.lock().expect("lock to not be poisoned"); + if self + .capacity + .is_some_and(|capacity| guard.queue.len() >= capacity) + { + return Err(Box::new(message)); + } + + guard.queue.push_back(message); + // Wake pending registered pops. + for (_, s) in guard.pending_pops.drain() { + s.wake(); + } + + Ok(()) + } + + /// Retain only the elements specified by the predicate. + /// In other words, remove all elements e for which f(&e) returns false. The elements are + /// visited in unsorted (and unspecified) order. Returns the cleared messages. + pub(crate) fn retain bool>(&mut self, f: F) { + let mut shared = self.inner.lock().expect("lock to not be poisoned"); + shared.queue.retain(f); + } + + /// Check if the queue is empty. + pub(crate) fn is_empty(&self) -> bool { + let guard = self.inner.lock().expect("lock to not be poisoned"); + guard.queue.len() == 0 + } + + /// Returns the length of the queue. + #[cfg(feature = "metrics")] + pub(crate) fn len(&self) -> usize { + let guard = self.inner.lock().expect("lock to not be poisoned"); + guard.queue.len() + } + + /// Attempts to pop an message from the queue. + /// returns None if the queue is empty. + #[cfg(test)] + pub(crate) fn try_pop(&mut self) -> Option { + let mut guard = self.inner.lock().expect("lock to not be poisoned"); + guard.queue.pop_front() + } +} + +impl Drop for Shared { + fn drop(&mut self) { + let mut guard = self.inner.lock().expect("lock to not be poisoned"); + guard.pending_pops.remove(&self.id); + } +} + +/// The shared stated by the `NetworkBehaviour`s and the `ConnectionHandler`s. +#[derive(Debug)] +struct SharedInner { + queue: VecDeque, + pending_pops: HashMap, +} diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 8f8a4f38a88..bea0786e060 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,7 +19,10 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. -use std::{collections::BTreeSet, fmt, fmt::Debug}; +use std::{ + collections::BTreeSet, + fmt::{self, Debug}, +}; use futures_timer::Delay; use hashlink::LinkedHashMap; @@ -30,34 +33,17 @@ use quick_protobuf::MessageWrite; use serde::{Deserialize, Serialize}; use web_time::Instant; -use crate::{rpc::Sender, rpc_proto::proto, TopicHash}; +use crate::{queue::Queue, rpc_proto::proto, TopicHash}; /// Messages that have expired while attempting to be sent to a peer. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct FailedMessages { - /// The number of publish messages that failed to be published in a heartbeat. - pub publish: usize, - /// The number of forward messages that failed to be published in a heartbeat. - pub forward: usize, - /// The number of messages that were failed to be sent to the priority queue as it was full. + /// The number of messages that were failed to be sent to the priority queue as it was + /// full. pub priority: usize, - /// The number of messages that were failed to be sent to the non-priority queue as it was + /// The number of messages that were failed to be sent to the non priority queue as it was /// full. pub non_priority: usize, - /// The number of messages that timed out and could not be sent. - pub timeout: usize, -} - -impl FailedMessages { - /// The total number of messages that failed due to the queue being full. - pub fn total_queue_full(&self) -> usize { - self.priority + self.non_priority - } - - /// The total failed messages in a heartbeat. - pub fn total(&self) -> usize { - self.priority + self.non_priority - } } #[derive(Debug)] @@ -111,10 +97,10 @@ pub(crate) struct PeerDetails { pub(crate) connections: Vec, /// Subscribed topics. pub(crate) topics: BTreeSet, - /// The rpc sender to the connection handler(s). - pub(crate) sender: Sender, /// Don't send messages. pub(crate) dont_send: LinkedHashMap, + /// Message queue consumed by the connection handler. + pub(crate) messages: Queue, } /// Describes the types of peers that can exist in the gossipsub context. @@ -319,10 +305,18 @@ pub struct IDontWant { pub enum RpcOut { /// Publish a Gossipsub message on network.`timeout` limits the duration the message /// can wait to be sent before it is abandoned. - Publish { message: RawMessage, timeout: Delay }, + Publish { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, /// Forward a Gossipsub message on network. `timeout` limits the duration the message /// can wait to be sent before it is abandoned. - Forward { message: RawMessage, timeout: Delay }, + Forward { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, /// Subscribe a topic. Subscribe(TopicHash), /// Unsubscribe a topic. @@ -346,24 +340,30 @@ impl RpcOut { pub fn into_protobuf(self) -> proto::RPC { self.into() } + + /// Returns true if the `RpcOut` is priority. + pub(crate) fn priority(&self) -> bool { + matches!( + self, + RpcOut::Subscribe(_) + | RpcOut::Unsubscribe(_) + | RpcOut::Graft(_) + | RpcOut::Prune(_) + | RpcOut::IDontWant(_) + ) + } } impl From for proto::RPC { /// Converts the RPC into protobuf format. fn from(rpc: RpcOut) -> Self { match rpc { - RpcOut::Publish { - message, - timeout: _, - } => proto::RPC { + RpcOut::Publish { message, .. } => proto::RPC { subscriptions: Vec::new(), publish: vec![message.into()], control: None, }, - RpcOut::Forward { - message, - timeout: _, - } => proto::RPC { + RpcOut::Forward { message, .. } => proto::RPC { publish: vec![message.into()], subscriptions: Vec::new(), control: None, From 4321429069748bbf9fcee64a1fd33dba08e60cdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 10 Oct 2025 15:11:24 +0100 Subject: [PATCH 28/68] address Daniel's review --- protocols/gossipsub/src/behaviour.rs | 15 ++++++++++----- protocols/gossipsub/src/partial.rs | 5 ++--- protocols/gossipsub/src/types.rs | 2 +- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 26452d2d836..c300ab94478 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -715,7 +715,11 @@ where // Get Peers from the mesh or fanout to publish a message to. // If partial set, filter out peers who only want partial messages for the topic. - fn get_publish_peers(&mut self, topic_hash: &TopicHash, partial: bool) -> HashSet { + fn get_publish_peers( + &mut self, + topic_hash: &TopicHash, + filter_partial: bool, + ) -> HashSet { let mesh_n = self.config.mesh_n_for_topic(topic_hash); let peers_on_topic = self @@ -724,7 +728,7 @@ where .filter(|(_, peer)| { #[cfg(feature = "partial_messages")] { - if partial && peer.partial_only_topics.contains(topic_hash) { + if filter_partial && peer.partial_only_topics.contains(topic_hash) { return false; } } @@ -761,7 +765,7 @@ where &self.connected_peers, topic_hash, needed_extra_peers, - partial, + filter_partial, |peer| { !mesh_peers.contains(peer) && !self.explicit_peers.contains(peer) @@ -795,7 +799,7 @@ where &self.connected_peers, topic_hash, mesh_n, - partial, + filter_partial, |p| { !self.explicit_peers.contains(p) && !self @@ -849,6 +853,7 @@ where let group_id = partial_message.group_id().as_ref().to_vec(); let recipient_peers = self.get_publish_peers(&topic_id, false); + let metadata = partial_message.parts_metadata().as_ref().to_vec(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -889,7 +894,7 @@ where *peer_id, RpcOut::PartialMessage { message: message_data, - metadata: partial_message.parts_metadata().as_ref().to_vec(), + metadata: metadata.clone(), group_id: group_id.clone(), topic_id: topic_id.clone(), }, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 1d6ca5a8476..49937fb3798 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -40,9 +40,8 @@ pub trait Partial { /// during reconstruction. fn group_id(&self) -> impl AsRef<[u8]>; - /// Returns metadata describing which parts of the message are available and which parts we want. - /// - /// This metadata is application-defined and should encode information about + /// Returns application defined metadata describing which parts of the message + /// are available and which parts we want. /// /// The returned bytes will be sent in partsMetadata field to advertise /// available and wanted parts to peers. diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index ea5183eca8b..37cb9ce39fb 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -364,7 +364,7 @@ pub struct TestExtension {} /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { - /// PublishV a Gossipsub message on network.`timeout` limits the duration the message + /// Publish a Gossipsub message on network.`timeout` limits the duration the message /// can wait to be sent before it is abandoned. Publish { message_id: MessageId, From c3aa3e4d0515a4e62b6c2a9c80b29538b502f6c9 Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Fri, 10 Oct 2025 16:43:04 +0200 Subject: [PATCH 29/68] enum return value for `partial_message_bytes_from_metadata` --- protocols/gossipsub/src/behaviour.rs | 38 ++++++++++++++++++---------- protocols/gossipsub/src/partial.rs | 22 +++++++++++++--- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index c300ab94478..e7a5d3ec685 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -76,7 +76,10 @@ use crate::{ }; #[cfg(feature = "partial_messages")] -use crate::{partial::Partial, types::PartialMessage}; +use crate::{ + partial::{Partial, PublishAction}, + types::PartialMessage, +}; #[cfg(test)] mod tests; @@ -865,9 +868,8 @@ where let peer_partials = peer.partial_messages.entry(topic_id.clone()).or_default(); let peer_partial = peer_partials.entry(group_id.clone()).or_default(); - let Ok((message_data, rest_wanted)) = partial_message - .partial_message_bytes_from_metadata(peer_partial.metadata.as_ref()) - .map(|(m, r)| (m.as_ref().to_vec(), r.map(|r| r.as_ref().to_vec()))) + let Ok(action) = + partial_message.partial_message_bytes_from_metadata(peer_partial.metadata.as_ref()) else { tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); @@ -875,25 +877,33 @@ where continue; }; - match rest_wanted { - r @ Some(_) => { + let message = match action { + PublishAction::NothingToSend => { // No new data to send peer. - if r == peer_partial.metadata { - continue; - } - peer_partial.metadata = r; + continue; } - // Peer partial is now complete - // remove it from the list - None => { + PublishAction::PeerHasAllData => { + // Peer partial is now complete + // remove it from the list peer_partials.remove(&group_id); + continue; + } + PublishAction::Send { metadata, message } => { + peer_partial.metadata = Some(metadata); + message + } + PublishAction::SendRemaining { message } => { + // Peer partial is complete after sending this message + // remove it from the list + peer_partials.remove(&group_id); + message } }; self.send_message( *peer_id, RpcOut::PartialMessage { - message: message_data, + message, metadata: metadata.clone(), group_id: group_id.clone(), topic_id: topic_id.clone(), diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 49937fb3798..455a811a237 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -53,13 +53,11 @@ pub trait Partial { /// generates the actual message data to send back. The `metadata` parameter /// describes what parts are being requested. /// - /// Returns a tuple of: - /// - The encoded partial message bytes to send over the network - /// - Optional remaining metadata if more parts are still available after this one + /// Returns a [`PublishAction`] for the given metadata, or an error. fn partial_message_bytes_from_metadata( &self, metadata: Option>, - ) -> Result<(impl AsRef<[u8]>, Option>), PartialMessageError>; + ) -> Result; /// Extends this message with received partial message data. /// @@ -74,3 +72,19 @@ pub trait Partial { data: &[u8], ) -> Result<(), PartialMessageError>; } + +/// Indicates the action to take for the given metadata. +pub enum PublishAction { + /// The metadata signals that the peer already has all data. Do not keep track of the peer + /// anymore. + PeerHasAllData, + /// While the peer still needs data, we do not have any data it needs, and therefore send + /// nothing but keep the metadata. + NothingToSend, + /// We have something of interest to this peer, but can not send everything it needs. Send a + /// message and associate some new metadata to the peer, representing the remaining need. + Send { message: Vec, metadata: Vec }, + /// We can send everything this peer needs. Send message, then do not keep track of the peer + /// anymore. + SendRemaining { message: Vec }, +} From 88806ad815c98023db32c4cc239e00a16f542f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 13 Oct 2025 16:26:54 +0100 Subject: [PATCH 30/68] update Partial implementation allow updating local data with remote data with monotonicity. I.e if we receive an oudated partial `Metadata::update()` should not override previous data. --- protocols/gossipsub/src/behaviour.rs | 41 ++++++++++++++-------------- protocols/gossipsub/src/partial.rs | 40 +++++++++++++-------------- protocols/gossipsub/src/types.rs | 25 +++++++++++++++-- 3 files changed, 62 insertions(+), 44 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index e7a5d3ec685..2442a2f10d3 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -878,26 +878,15 @@ where }; let message = match action { - PublishAction::NothingToSend => { + PublishAction::SameMetadata => { // No new data to send peer. continue; } - PublishAction::PeerHasAllData => { - // Peer partial is now complete - // remove it from the list - peer_partials.remove(&group_id); - continue; - } PublishAction::Send { metadata, message } => { - peer_partial.metadata = Some(metadata); - message - } - PublishAction::SendRemaining { message } => { - // Peer partial is complete after sending this message - // remove it from the list - peer_partials.remove(&group_id); - message + peer_partial.metadata = Some(crate::types::PeerMetadata::Local(metadata)); + Some(message) } + PublishAction::NothingToSend => None, }; self.send_message( @@ -1694,6 +1683,8 @@ where /// Handle incoming partial message from a peer. #[cfg(feature = "partial_messages")] fn handle_partial_message(&mut self, peer_id: &PeerId, partial_message: PartialMessage) { + use crate::types::PeerMetadata; + tracing::debug!( peer=%peer_id, topic=%partial_message.topic_id, @@ -1717,13 +1708,23 @@ where .entry(partial_message.group_id.clone()) .or_default(); - // Noop if the received partial is the same we already have. - if partial_message.metadata == peer_partial.metadata { - return; + match (&peer_partial.metadata, &partial_message.metadata) { + (None, Some(remote_metadata)) => { + peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())) + } + (Some(PeerMetadata::Remote(ref metadata)), Some(remote_metadata)) => { + if metadata != remote_metadata { + peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + } + } + (Some(PeerMetadata::Local(metadata)), Some(remote_metadata)) => { + if !metadata.update(remote_metadata) { + return; + } + } + (Some(_), None) | (None, None) => {} } - peer_partial.metadata = partial_message.metadata.clone(); - self.events .push_back(ToSwarm::GenerateEvent(Event::Partial { topic_id: partial_message.topic_id, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 455a811a237..ed884c87965 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::fmt::Debug; + use crate::error::PartialMessageError; /// PartialMessage is a message that can be broken up into parts. @@ -33,6 +35,8 @@ use crate::error::PartialMessageError; /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together pub trait Partial { + type Metadata: Metadata; + /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -58,33 +62,27 @@ pub trait Partial { &self, metadata: Option>, ) -> Result; +} - /// Extends this message with received partial message data. - /// - /// When partial message data is received from a peer, this method integrates - /// it into the current message state. The implementation should validate and - /// store the received data appropriately. - /// - /// Returns `Ok(())` if the data was successfully integrated, or `Err`, - /// if the data was invalid or couldn't be processed. - fn extend_from_encoded_partial_message( - &mut self, - data: &[u8], - ) -> Result<(), PartialMessageError>; +pub trait Metadata: Debug { + /// Return the `Metadata` as a byte slice. + fn as_slice(&self) -> &[u8]; + /// try to Update the `Metadata` with the remote data, + /// return true if it was updated. + fn update(&self, data: &[u8]) -> bool; } /// Indicates the action to take for the given metadata. pub enum PublishAction { - /// The metadata signals that the peer already has all data. Do not keep track of the peer - /// anymore. - PeerHasAllData, - /// While the peer still needs data, we do not have any data it needs, and therefore send - /// nothing but keep the metadata. + /// The provided input metadata is the same as the output, + /// this means we have the same data as the peer. + SameMetadata, + /// We have nothing to send to the peer, but we need parts from the peer. NothingToSend, /// We have something of interest to this peer, but can not send everything it needs. Send a /// message and associate some new metadata to the peer, representing the remaining need. - Send { message: Vec, metadata: Vec }, - /// We can send everything this peer needs. Send message, then do not keep track of the peer - /// anymore. - SendRemaining { message: Vec }, + Send { + message: Vec, + metadata: Box, + }, } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 37cb9ce39fb..93632f442e9 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -113,12 +113,31 @@ pub(crate) struct PeerDetails { pub(crate) messages: Queue, } +/// Stored `Metadata` for a peer. +/// +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) enum PeerMetadata { + Remote(Vec), + Local(Box), +} + +#[cfg(feature = "partial_messages")] +impl AsRef<[u8]> for PeerMetadata { + fn as_ref(&self) -> &[u8] { + match self { + PeerMetadata::Remote(metadata) => metadata, + PeerMetadata::Local(metadata) => metadata.as_slice(), + } + } +} + /// The partial message data the peer has. #[cfg(feature = "partial_messages")] #[derive(Debug)] pub(crate) struct PartialData { /// The current peer partial metadata. - pub(crate) metadata: Option>, + pub(crate) metadata: Option, /// The remaining heartbeats for this message to be deleted. pub(crate) ttl: usize, } @@ -408,7 +427,7 @@ pub enum RpcOut { /// The topic ID this partial message belongs to. topic_id: TopicHash, /// The partial message itself. - message: Vec, + message: Option>, /// The partial metadata we have and want. metadata: Vec, }, @@ -620,7 +639,7 @@ impl From for proto::RPC { partial: Some(proto::PartialMessagesExtension { topicID: Some(topic_id.as_str().as_bytes().to_vec()), groupID: Some(group_id), - partialMessage: Some(message), + partialMessage: message, partsMetadata: Some(metadata), }), }, From 4ea2e45cc2ad807c478270c22127f4b87d5f1601 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 14 Oct 2025 11:24:42 +0100 Subject: [PATCH 31/68] address Daniel's review --- protocols/gossipsub/src/behaviour.rs | 34 +++++++++++++++++++--------- protocols/gossipsub/src/partial.rs | 4 +--- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 2442a2f10d3..9a6c8aa7630 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -874,6 +874,7 @@ where tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); peer_partials.remove(&group_id); + //TODO: penalize peer. continue; }; @@ -1718,21 +1719,32 @@ where } } (Some(PeerMetadata::Local(metadata)), Some(remote_metadata)) => { - if !metadata.update(remote_metadata) { - return; + match metadata.update(remote_metadata) { + Ok(true) => { + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); + } + Ok(false) => {} + Err(err) => { + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + err=%err, + "Error updating Partial metadata" + ); + //TODO: penalize peer. + } } } (Some(_), None) | (None, None) => {} } - - self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial { - topic_id: partial_message.topic_id, - propagation_source: *peer_id, - group_id: partial_message.group_id, - message: partial_message.message, - metadata: partial_message.metadata, - })); } /// Removes the specified peer from the mesh, returning true if it was present. diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index ed884c87965..0a42a79ce9a 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -35,8 +35,6 @@ use crate::error::PartialMessageError; /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together pub trait Partial { - type Metadata: Metadata; - /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -69,7 +67,7 @@ pub trait Metadata: Debug { fn as_slice(&self) -> &[u8]; /// try to Update the `Metadata` with the remote data, /// return true if it was updated. - fn update(&self, data: &[u8]) -> bool; + fn update(&self, data: &[u8]) -> Result; } /// Indicates the action to take for the given metadata. From ea73a7147e74a7dd97d1b1c374aeb429ace76f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 14 Oct 2025 12:35:34 +0100 Subject: [PATCH 32/68] address missing review comment --- protocols/gossipsub/src/behaviour.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 9a6c8aa7630..cfe8a12d5ab 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -603,7 +603,7 @@ where self.leave(&topic_hash); #[cfg(feature = "partial_messages")] { - self.partial_only_topics.insert(topic_hash.clone()); + self.partial_only_topics.remove(&topic_hash.clone()); } tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); From 0e5245131beed263d8b05bb5f480e5a77f9f6206 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 14 Oct 2025 17:26:33 +0100 Subject: [PATCH 33/68] fix(libp2p): delegate metrics feature to gossipsub when enabled Pull-Request: #6180. --- Cargo.lock | 2 +- Cargo.toml | 2 +- libp2p/CHANGELOG.md | 5 +++++ libp2p/Cargo.toml | 4 ++-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1e2bd967b0..c85e83ff003 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2414,7 +2414,7 @@ checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libp2p" -version = "0.56.0" +version = "0.56.1" dependencies = [ "bytes", "either", diff --git a/Cargo.toml b/Cargo.toml index e51faa159be..2cc5140e903 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ rust-version = "1.83.0" edition = "2021" [workspace.dependencies] -libp2p = { version = "0.56.0", path = "libp2p" } +libp2p = { version = "0.56.1", path = "libp2p" } libp2p-allow-block-list = { version = "0.6.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.15.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.6.0", path = "misc/connection-limits" } diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index cb27ebe68e5..5c6c4a6865c 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.56.1 + +- Fix `metrics` delegation to gossipsub protocol. + See [PR 6180](https://github.com/libp2p/rust-libp2p/pull/6180) + ## 0.56.0 - Remove `async-std` support. diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 48f4c9477bd..dd1952fb93e 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition.workspace = true rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.56.0" +version = "0.56.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -65,7 +65,7 @@ kad = ["dep:libp2p-kad", "libp2p-metrics?/kad"] macros = ["libp2p-swarm/macros"] mdns = ["dep:libp2p-mdns"] memory-connection-limits = ["dep:libp2p-memory-connection-limits"] -metrics = ["dep:libp2p-metrics"] +metrics = ["dep:libp2p-metrics", "libp2p-gossipsub?/metrics"] noise = ["dep:libp2p-noise"] ping = ["dep:libp2p-ping", "libp2p-metrics?/ping"] plaintext = ["dep:libp2p-plaintext"] From c415f372bf79dc152931b982bc910e35db42243a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 15 Oct 2025 10:24:13 +0100 Subject: [PATCH 34/68] impl Daniel's suggestions for the public API --- protocols/gossipsub/src/behaviour.rs | 2 +- protocols/gossipsub/src/partial.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index cfe8a12d5ab..2f2cb4022f5 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -849,7 +849,7 @@ where pub fn publish_partial( &mut self, topic: impl Into, - partial_message: P, + partial_message: &P, ) -> Result<(), PublishError> { let topic_id = topic.into(); diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 0a42a79ce9a..6a5f66115e7 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -67,7 +67,7 @@ pub trait Metadata: Debug { fn as_slice(&self) -> &[u8]; /// try to Update the `Metadata` with the remote data, /// return true if it was updated. - fn update(&self, data: &[u8]) -> Result; + fn update(&mut self, data: &[u8]) -> Result; } /// Indicates the action to take for the given metadata. From 3048649f5ae0900d666d5a631e2f0310fee5ecef Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Wed, 15 Oct 2025 11:32:32 +0200 Subject: [PATCH 35/68] borrow metadata mutably for `metadata.update` --- protocols/gossipsub/src/behaviour.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 2f2cb4022f5..3fb6dc7dfa0 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1709,7 +1709,7 @@ where .entry(partial_message.group_id.clone()) .or_default(); - match (&peer_partial.metadata, &partial_message.metadata) { + match (&mut peer_partial.metadata, &partial_message.metadata) { (None, Some(remote_metadata)) => { peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())) } From 1685aae00b5e004fa710e6c198a7dbd6294b5200 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 15 Oct 2025 11:47:12 +0100 Subject: [PATCH 36/68] impl penalization for failed partials --- protocols/gossipsub/src/behaviour.rs | 16 ++++++++++------ protocols/gossipsub/src/peer_score.rs | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 3fb6dc7dfa0..7128d6d2643 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -851,11 +851,11 @@ where topic: impl Into, partial_message: &P, ) -> Result<(), PublishError> { - let topic_id = topic.into(); + let topic_hash = topic.into(); let group_id = partial_message.group_id().as_ref().to_vec(); - let recipient_peers = self.get_publish_peers(&topic_id, false); + let recipient_peers = self.get_publish_peers(&topic_hash, false); let metadata = partial_message.parts_metadata().as_ref().to_vec(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` @@ -865,7 +865,7 @@ where continue; }; - let peer_partials = peer.partial_messages.entry(topic_id.clone()).or_default(); + let peer_partials = peer.partial_messages.entry(topic_hash.clone()).or_default(); let peer_partial = peer_partials.entry(group_id.clone()).or_default(); let Ok(action) = @@ -874,7 +874,9 @@ where tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); peer_partials.remove(&group_id); - //TODO: penalize peer. + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &topic_hash); + } continue; }; @@ -896,7 +898,7 @@ where message, metadata: metadata.clone(), group_id: group_id.clone(), - topic_id: topic_id.clone(), + topic_id: topic_hash.clone(), }, ); } @@ -1739,7 +1741,9 @@ where err=%err, "Error updating Partial metadata" ); - //TODO: penalize peer. + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &partial_message.topic_id); + } } } } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 7a30038c48b..b0f4f1a433c 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -532,6 +532,23 @@ impl PeerScore { } } + /// Indicate that a peer has sent us invalid partial message data. + pub(crate) fn reject_invalid_partial(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered invalid partial data in topic and gets penalized \ + for it", + ); + topic_stats.invalid_message_deliveries += 1f64; + } + } + } + /// Removes an ip from a peer pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { From 69cde69329c381048a9331596a7b8edfb1f69abf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 15 Oct 2025 11:54:48 +0100 Subject: [PATCH 37/68] address Marco review --- protocols/gossipsub/src/behaviour.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 7128d6d2643..78d4e1f3fef 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -717,11 +717,11 @@ where } // Get Peers from the mesh or fanout to publish a message to. - // If partial set, filter out peers who only want partial messages for the topic. + // If `exclude_partial_only` set, filter out peers who only want partial messages for the topic. fn get_publish_peers( &mut self, topic_hash: &TopicHash, - filter_partial: bool, + exclude_partial_only: bool, ) -> HashSet { let mesh_n = self.config.mesh_n_for_topic(topic_hash); @@ -731,7 +731,7 @@ where .filter(|(_, peer)| { #[cfg(feature = "partial_messages")] { - if filter_partial && peer.partial_only_topics.contains(topic_hash) { + if exclude_partial_only && peer.partial_only_topics.contains(topic_hash) { return false; } } @@ -768,7 +768,7 @@ where &self.connected_peers, topic_hash, needed_extra_peers, - filter_partial, + exclude_partial_only, |peer| { !mesh_peers.contains(peer) && !self.explicit_peers.contains(peer) @@ -802,7 +802,7 @@ where &self.connected_peers, topic_hash, mesh_n, - filter_partial, + exclude_partial_only, |p| { !self.explicit_peers.contains(p) && !self @@ -2777,7 +2777,7 @@ where for topics in peer.partial_messages.values_mut() { topics.retain(|_, partial| { partial.ttl -= 1; - partial.ttl <= 0 + partial.ttl == 0 }); } } @@ -3795,8 +3795,8 @@ fn peer_removed_from_mesh( fn get_random_peers_dynamic( connected_peers: &HashMap, topic_hash: &TopicHash, - // If we want to filter for partial only peers. - partial: bool, + // If we want to exclude partial only peers. + exclude_partial: bool, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, mut f: impl FnMut(&PeerId) -> bool, @@ -3806,7 +3806,7 @@ fn get_random_peers_dynamic( .filter_map(|(peer_id, peer)| { #[cfg(feature = "partial_messages")] { - if partial && peer.partial_only_topics.contains(topic_hash) { + if exclude_partial && peer.partial_only_topics.contains(topic_hash) { return None; } } @@ -3840,10 +3840,10 @@ fn get_random_peers( connected_peers: &HashMap, topic_hash: &TopicHash, n: usize, - partial: bool, + exclude_partial: bool, f: impl FnMut(&PeerId) -> bool, ) -> BTreeSet { - get_random_peers_dynamic(connected_peers, topic_hash, partial, |_| n, f) + get_random_peers_dynamic(connected_peers, topic_hash, exclude_partial, |_| n, f) } /// Validates the combination of signing, privacy and message validation to ensure the From 19e359dedb3a1f4d3bed9c7e55a2ce12ba8322b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 15 Oct 2025 11:56:29 +0100 Subject: [PATCH 38/68] cargo fmt --- protocols/gossipsub/src/behaviour.rs | 1 - protocols/gossipsub/src/behaviour/tests.rs | 80 ++++++++++++++++++---- protocols/gossipsub/src/lib.rs | 5 +- protocols/gossipsub/src/types.rs | 5 +- 4 files changed, 69 insertions(+), 22 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 78d4e1f3fef..1e909441fb3 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -74,7 +74,6 @@ use crate::{ }, FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; - #[cfg(feature = "partial_messages")] use crate::{ partial::{Partial, PublishAction}, diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 9d8c276798f..10a37160a6c 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -85,7 +85,12 @@ where // subscribe to the topics for t in self.topics { let topic = Topic::new(t); - gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); topic_hashes.push(topic.hash().clone()); } @@ -581,7 +586,12 @@ fn test_join() { // re-subscribe - there should be peers associated with the topic assert!( - gs.subscribe(&topics[0], #[cfg(feature = "partial_messages")] false).unwrap(), + gs.subscribe( + &topics[0], + #[cfg(feature = "partial_messages")] + false + ) + .unwrap(), "should be able to subscribe successfully" ); @@ -646,8 +656,8 @@ fn test_join() { extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), - #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); queues.insert(random_peer, receiver_queue); @@ -671,7 +681,12 @@ fn test_join() { } // subscribe to topic1 - gs.subscribe(&topics[1], #[cfg(feature = "partial_messages")] false).unwrap(); + gs.subscribe( + &topics[1], + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // the three new peers should have been added, along with 3 more from the pool. assert!( @@ -1051,8 +1066,8 @@ fn test_get_random_peers() { extensions: None, #[cfg(feature = "partial_messages")] partial_messages: Default::default(), - #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + #[cfg(feature = "partial_messages")] + partial_only_topics: Default::default(), }, ); } @@ -1738,7 +1753,12 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { } // subscribe now to topic - gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -1791,7 +1811,12 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); // subscribe now to topic - gs.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -2196,7 +2221,11 @@ fn test_unsubscribe_backoff() { "Peer should be pruned with `unsubscribe_backoff`." ); - let _ = gs.subscribe(&Topic::new(topics[0].to_string()), #[cfg(feature = "partial_messages")] false); + let _ = gs.subscribe( + &Topic::new(topics[0].to_string()), + #[cfg(feature = "partial_messages")] + false, + ); // forget all events until now let queues = flush_events(&mut gs, queues); @@ -5228,8 +5257,20 @@ fn test_subscribe_to_invalid_topic() { .to_subscribe(false) .create_network(); - assert!(gs.subscribe(&t1, #[cfg(feature = "partial_messages")] false).is_ok()); - assert!(gs.subscribe(&t2, #[cfg(feature = "partial_messages")] false).is_err()); + assert!(gs + .subscribe( + &t1, + #[cfg(feature = "partial_messages")] + false + ) + .is_ok()); + assert!(gs + .subscribe( + &t2, + #[cfg(feature = "partial_messages")] + false + ) + .is_err()); } #[test] @@ -5258,7 +5299,12 @@ fn test_subscribe_and_graft_with_negative_score() { let original_score = gs1.as_peer_score_mut().score_report(&p2).score; // subscribe to topic in gs2 - gs2.subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); + gs2.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, p1: PeerId, @@ -6364,8 +6410,12 @@ fn test_multiple_topics_with_different_configs() { // re-subscribe to topic1 assert!( - gs.subscribe(&Topic::new(topic_hashes[0].to_string()), #[cfg(feature = "partial_messages")] false) - .unwrap(), + gs.subscribe( + &Topic::new(topic_hashes[0].to_string()), + #[cfg(feature = "partial_messages")] + false + ) + .unwrap(), "Should subscribe successfully" ); diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index a0c2d007b5b..1d68bec7208 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -119,9 +119,6 @@ pub mod partial; #[cfg(feature = "metrics")] pub use metrics::Config as MetricsConfig; -#[cfg(feature = "partial_messages")] -pub use self::{error::PartialMessageError, partial::Partial}; - pub use self::{ behaviour::{Behaviour, Event, MessageAuthenticity}, config::{Config, ConfigBuilder, ValidationMode, Version}, @@ -139,6 +136,8 @@ pub use self::{ transform::{DataTransform, IdentityTransform}, types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}, }; +#[cfg(feature = "partial_messages")] +pub use self::{error::PartialMessageError, partial::Partial}; pub type IdentTopic = Topic; pub type Sha256Topic = Topic; diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 93632f442e9..ceee594ac90 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,6 +19,8 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. +#[cfg(feature = "partial_messages")] +use std::collections::HashMap; use std::{ collections::BTreeSet, fmt::{self, Debug}, @@ -31,8 +33,6 @@ use libp2p_swarm::ConnectionId; use quick_protobuf::MessageWrite; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[cfg(feature = "partial_messages")] -use std::collections::HashMap; use web_time::Instant; use crate::{queue::Queue, rpc_proto::proto, TopicHash}; @@ -114,7 +114,6 @@ pub(crate) struct PeerDetails { } /// Stored `Metadata` for a peer. -/// #[cfg(feature = "partial_messages")] #[derive(Debug)] pub(crate) enum PeerMetadata { From a753ab0a656b5f61fbe30817fd55cad2b3a948f9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 18 Oct 2025 08:26:41 +1100 Subject: [PATCH 39/68] fix(gossipsub): Fix underflow when shuffling peers after prunning. I noticed two issues during testing. 1. We allow our mesh to grow greater than `mesh_n_high`, intentionally 2. There is a potential underflow in the heartbeat that can cause a panic For the first 1. This looks like its intentional but I can't recall why we would have added it. I think its counter-intuitive to allow our mesh to grow larger than the specified parameter. I suspect we added it to prevent our mesh from being filled with inbound peers and potentially being eclipsed. I suspect the best approach here is to remove inbound peers in the mesh maintenance rather than exceeding the mesh_n_high configuration. For 2. There is an underflow which this PR prevents. It can be triggered for low mesh_n_high values, i.e 0. This shouldn't be a concern for regular users, but we shouldn't have code that can panic based on user configuration. Pull-Request: #6183. --- protocols/gossipsub/CHANGELOG.md | 3 +++ protocols/gossipsub/src/behaviour.rs | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 6e19eaf1be9..940030a632b 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,7 @@ ## 0.50.0 +- Fix underflow when shuffling peers after prunning. + See [PR 6183](https://github.com/libp2p/rust-libp2p/pull/6183) + - Remove peer penalty for duplicate messages. See [PR 6112](https://github.com/libp2p/rust-libp2p/pull/6112) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index a0a3a16f0e7..1b780ffba5e 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -2228,7 +2228,9 @@ where score_p1.partial_cmp(&score_p2).unwrap_or(Ordering::Equal) }); // shuffle everything except the last retain_scores many peers (the best ones) - shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + if peers.len() > self.config.retain_scores() { + shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + } // count total number of outbound peers let mut outbound = shuffled From 0a81a74d5112b219eb5500fc45e96f092ca20d48 Mon Sep 17 00:00:00 2001 From: hanabi1224 Date: Sat, 18 Oct 2025 07:17:43 +0800 Subject: [PATCH 40/68] chore(dpes): bump `prometheus-client` Pull-Request: #6181. --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c85e83ff003..421ae65f53f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4259,9 +4259,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" +checksum = "e4500adecd7af8e0e9f4dbce15cfee07ce913fbf6ad605cc468b83f2d531ee94" dependencies = [ "dtoa", "itoa", @@ -4271,9 +4271,9 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +checksum = "9adf1691c04c0a5ff46ff8f262b58beb07b0dbb61f96f9f54f6cbd82106ed87f" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 2cc5140e903..36c148cffee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,7 +130,7 @@ hickory-resolver = { version = "0.25.2", default-features = false } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } -prometheus-client = "0.23" +prometheus-client = "0.24" quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rcgen = "0.13" From 6b3c57a4b50e9562a0fe03326c10ecef313cca0b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Oct 2025 13:23:15 +0000 Subject: [PATCH 41/68] chore(deps): bump quic-go/quic-go from 0.50.1 to 0.54.1 in /wasm-tests Pull-Request: #6178. --- .../webtransport-tests/echo-server/go.mod | 57 ++++++++------- .../webtransport-tests/echo-server/go.sum | 70 +++++++++++++++---- 2 files changed, 84 insertions(+), 43 deletions(-) diff --git a/wasm-tests/webtransport-tests/echo-server/go.mod b/wasm-tests/webtransport-tests/echo-server/go.mod index 4ffb98f386c..a3576a42489 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.mod +++ b/wasm-tests/webtransport-tests/echo-server/go.mod @@ -1,11 +1,11 @@ module echo-server -go 1.24 +go 1.24.6 require ( - github.com/libp2p/go-libp2p v0.41.0 - github.com/multiformats/go-multiaddr v0.15.0 - github.com/quic-go/quic-go v0.50.1 + github.com/libp2p/go-libp2p v0.44.0 + github.com/multiformats/go-multiaddr v0.16.1 + github.com/quic-go/quic-go v0.55.0 ) require ( @@ -16,46 +16,45 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 // indirect github.com/ipfs/go-cid v0.5.0 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.2 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.6.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.23.0 // indirect - github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.63.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - go.uber.org/mock v0.5.0 // indirect + go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/tools v0.31.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect - lukechampine.com/blake3 v1.4.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20251017212417-90e834f514db // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.38.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/wasm-tests/webtransport-tests/echo-server/go.sum b/wasm-tests/webtransport-tests/echo-server/go.sum index 10c8992f9a3..a994475b9f4 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.sum +++ b/wasm-tests/webtransport-tests/echo-server/go.sum @@ -40,10 +40,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= @@ -61,8 +57,6 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo75g0YSY61ms37qzPglu4p0sGro= -github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -80,8 +74,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -95,10 +93,14 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-libp2p v0.41.0 h1:JRaD39dqf/tBBGapJ0T38N73vOaDCsWgcx3mE6HgXWk= github.com/libp2p/go-libp2p v0.41.0/go.mod h1:Be8QYqC4JW6Xq8buukNeoZJjyT1XUDcGoIooCHm1ye4= +github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= +github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po= github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -120,24 +122,28 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= -github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -145,21 +151,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= +github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q= -github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -196,6 +214,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= @@ -207,12 +226,15 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -223,9 +245,13 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw= +golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -235,6 +261,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -250,6 +278,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -263,6 +293,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -277,16 +309,20 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,6 +334,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -319,6 +357,8 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -337,5 +377,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= From dd3fb5d7f9c7a88571e72a76b84f14a30f049746 Mon Sep 17 00:00:00 2001 From: hoijui Date: Mon, 20 Oct 2025 17:21:24 +0200 Subject: [PATCH 42/68] chore(README.md): Add Links to mentioned tech Adds links to related tech mentioned in examples/README. This is useful to people new to the field, like me. Pull-Request: #6016. --- examples/README.md | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/examples/README.md b/examples/README.md index b1fb9f1f104..2e397960b76 100644 --- a/examples/README.md +++ b/examples/README.md @@ -24,19 +24,26 @@ Each example includes its own README.md file with specific instructions on how t ## Individual libp2p features -- [Chat](./chat) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. -- [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol. +- [Chat](./chat) A basic chat application demonstrating libp2p and the [mDNS] and [Gossipsub] protocols. +- [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the [mDNS] and [Kademlia] protocol. - [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name. - While obviously showcasing how to build a basic file sharing application with the Kademlia and - Request-Response protocol, the actual goal of this example is **to show how to integrate + While obviously showcasing how to build a basic file sharing application with the [Kademlia] and + [Request-Response] protocol, the actual goal of this example is **to show how to integrate rust-libp2p into a larger application**. -- [IPFS Kademlia](./ipfs-kad) Demonstrates how to perform Kademlia queries on the IPFS network. +- [IPFS Kademlia](./ipfs-kad) Demonstrates how to perform [Kademlia] queries on the [IPFS] network. -- [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. +- [IPFS Private](./ipfs-private) Implementation using the [Gossipsub], ping and identify protocols to implement the IPFS private swarms feature. - [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../libp2p/src/tutorials/ping.rs) for a step-by-step guide building the example. -- [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). +- [Rendezvous](./rendezvous) [Rendezvous] Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). + +[mDNS]: https://github.com/libp2p/specs/blob/master/discovery/mdns.md +[Gossipsub]: https://github.com/libp2p/specs/tree/master/pubsub/gossipsub +[Kademlia]: https://github.com/libp2p/specs/blob/master/kad-dht/README.md +[Request-Response]: https://en.wikipedia.org/wiki/Request%E2%80%93response +[IPFS]: https://ipfs.tech/ +[Rendezvous]: https://github.com/libp2p/specs/blob/master/rendezvous/README.md From 54228bfaedd6a4f6196a89e8d69864fe0d74a87e Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Tue, 21 Oct 2025 10:47:57 +0200 Subject: [PATCH 43/68] Require Metadata to be Send + Sync --- protocols/gossipsub/src/partial.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 6a5f66115e7..fe6555d7a54 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -62,7 +62,7 @@ pub trait Partial { ) -> Result; } -pub trait Metadata: Debug { +pub trait Metadata: Debug + Send + Sync { /// Return the `Metadata` as a byte slice. fn as_slice(&self) -> &[u8]; /// try to Update the `Metadata` with the remote data, From e0e603751563bead62f01a898e4c7056587f750a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 24 Oct 2025 12:12:42 +0100 Subject: [PATCH 44/68] fix(gossisub): prevent mesh exceeding mesh_n_high Split off from #6183, to quote: >I noticed two issues during testing. > >We allow our mesh to grow greater than mesh_n_high, intentionally >This looks like its intentional but I can't recall why we would have added it. I think its counter-intuitive to allow our mesh to grow larger than the specified parameter. I suspect we added it to prevent our mesh from being filled with inbound peers and potentially being eclipsed. I suspect the best approach here is to remove inbound peers in the mesh maintenance rather than exceeding the mesh_n_high configuration. Pull-Request: #6184. --- protocols/gossipsub/CHANGELOG.md | 3 + protocols/gossipsub/src/behaviour.rs | 9 +- protocols/gossipsub/src/behaviour/tests.rs | 178 +++++++++------------ 3 files changed, 85 insertions(+), 105 deletions(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 940030a632b..b143b72c15c 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,7 @@ ## 0.50.0 +- Prevent mesh exceeding mesh_n_high. + See [PR 6184](https://github.com/libp2p/rust-libp2p/pull/6184) + - Fix underflow when shuffling peers after prunning. See [PR 6183](https://github.com/libp2p/rust-libp2p/pull/6183) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 1b780ffba5e..3d516098a7c 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1366,8 +1366,6 @@ where tracing::error!(peer_id = %peer_id, "Peer non-existent when handling graft"); return; }; - // Needs to be here to comply with the borrow checker. - let is_outbound = connected_peer.outbound; // For each topic, if a peer has grafted us, then we necessarily must be in their mesh // and they must be subscribed to the topic. Ensure we have recorded the mapping. @@ -1419,8 +1417,6 @@ where peer_score.add_penalty(peer_id, 1); // check the flood cutoff - // See: https://github.com/rust-lang/rust-clippy/issues/10061 - #[allow(unknown_lints, clippy::unchecked_duration_subtraction)] let flood_cutoff = (backoff_time + self.config.graft_flood_threshold()) - self.config.prune_backoff(); @@ -1455,10 +1451,9 @@ where } // check mesh upper bound and only allow graft if the upper bound is not reached - // or if it is an outbound peer let mesh_n_high = self.config.mesh_n_high_for_topic(&topic_hash); - if peers.len() >= mesh_n_high && !is_outbound { + if peers.len() >= mesh_n_high { to_prune_topics.insert(topic_hash.clone()); continue; } @@ -2208,7 +2203,7 @@ where } // too many peers - remove some - if peers.len() > mesh_n_high { + if peers.len() >= mesh_n_high { tracing::debug!( topic=%topic_hash, "HEARTBEAT: Mesh high. Topic contains: {} will reduce to: {}", diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 7dd6c4f3689..debc05d9d2b 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -2340,59 +2340,23 @@ fn test_gossip_to_at_most_gossip_factor_peers() { ); } -#[test] -fn test_accept_only_outbound_peer_grafts_when_mesh_full() { - let config: Config = Config::default(); - - // enough peers to fill the mesh - let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers => this will fill the mesh - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // assert current mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - // create an outbound and an inbound peer - let (inbound, _in_queue) = add_peer(&mut gs, &topics, false, false); - let (outbound, _out_queue) = add_peer(&mut gs, &topics, true, false); - - // send grafts - gs.handle_graft(&inbound, vec![topics[0].clone()]); - gs.handle_graft(&outbound, vec![topics[0].clone()]); - - // assert mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - - // inbound is not in mesh - assert!(!gs.mesh[&topics[0]].contains(&inbound)); - - // outbound is in mesh - assert!(gs.mesh[&topics[0]].contains(&outbound)); -} - #[test] fn test_do_not_remove_too_many_outbound_peers() { // use an extreme case to catch errors with high probability - let m = 50; - let n = 2 * m; + let mesh_n = 50; + let mesh_n_high = 2 * mesh_n; let config = ConfigBuilder::default() - .mesh_n_high(n) - .mesh_n(n) - .mesh_n_low(n) - .mesh_outbound_min(m) + .mesh_n_high(mesh_n_high) + .mesh_n(mesh_n) + // Irrelevant for this test. + .mesh_n_low(mesh_n) + .mesh_outbound_min(mesh_n) .build() .unwrap(); // fill the mesh with inbound connections let (mut gs, peers, _queues, topics) = inject_nodes1() - .peer_no(n) + .peer_no(mesh_n) .topics(vec!["test".into()]) .to_subscribe(true) .gs_config(config) @@ -2405,60 +2369,26 @@ fn test_do_not_remove_too_many_outbound_peers() { // create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); - for _ in 0..m { + // Go from 50 (mesh_n) to 100 (mesh_n_high) to trigger prunning. + for _ in 0..mesh_n { let (peer, _) = add_peer(&mut gs, &topics, true, false); outbound.insert(peer); gs.handle_graft(&peer, topics.clone()); } // mesh is overly full - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), mesh_n_high); // run a heartbeat gs.heartbeat(); - // Peers should be removed to reach n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); + // Peers should be removed to reach `mesh_n` + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), mesh_n); // all outbound peers are still in the mesh assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); } -#[test] -fn test_add_outbound_peers_if_min_is_not_satisfied() { - let config: Config = Config::default(); - - // Fill full mesh with inbound peers - let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // create config.mesh_outbound_min() many outbound connections without grafting - let mut peers = vec![]; - for _ in 0..config.mesh_outbound_min() { - peers.push(add_peer(&mut gs, &topics, true, false)); - } - - // Nothing changed in the mesh yet - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - // run a heartbeat - gs.heartbeat(); - - // The outbound peers got additionally added - assert_eq!( - gs.mesh[&topics[0]].len(), - config.mesh_n_high() + config.mesh_outbound_min() - ); -} - #[test] fn test_prune_negative_scored_peers() { let config = Config::default(); @@ -3205,22 +3135,20 @@ fn test_keep_best_scoring_peers_on_oversubscription() { .build() .unwrap(); - // build mesh with more peers than mesh can hold - let n = config.mesh_n_high() + 1; + let mesh_n_high = config.mesh_n_high(); + let (mut gs, peers, _queues, topics) = inject_nodes1() - .peer_no(n) + .peer_no(mesh_n_high) .topics(vec!["test".into()]) .to_subscribe(true) .gs_config(config.clone()) .explicit(0) - .outbound(n) .scoring(Some(( PeerScoreParams::default(), PeerScoreThresholds::default(), ))) .create_network(); - // graft all, will be accepted since the are outbound for peer in &peers { gs.handle_graft(peer, topics.clone()); } @@ -3232,7 +3160,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { gs.set_application_score(peer, index as f64); } - assert_eq!(gs.mesh[&topics[0]].len(), n); + assert_eq!(gs.mesh[&topics[0]].len(), mesh_n_high); // heartbeat to prune some peers gs.heartbeat(); @@ -3241,7 +3169,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { // mesh contains retain_scores best peers assert!(gs.mesh[&topics[0]].is_superset( - &peers[(n - config.retain_scores())..] + &peers[(mesh_n_high - config.retain_scores())..] .iter() .cloned() .collect() @@ -6118,10 +6046,13 @@ fn test_mesh_subtraction_with_topic_config() { let topic = String::from("topic1"); let topic_hash = TopicHash::from_raw(topic.clone()); + let mesh_n = 5; + let mesh_n_high = 7; + let topic_config = TopicMeshConfig { - mesh_n: 5, + mesh_n, + mesh_n_high, mesh_n_low: 3, - mesh_n_high: 7, mesh_outbound_min: 2, }; @@ -6130,15 +6061,12 @@ fn test_mesh_subtraction_with_topic_config() { .build() .unwrap(); - let peer_no = 12; - - // make all outbound connections so grafting to all will be allowed let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(peer_no) + .peer_no(mesh_n_high) .topics(vec![topic]) .to_subscribe(true) .gs_config(config.clone()) - .outbound(peer_no) + .outbound(mesh_n_high) .create_network(); // graft all peers @@ -6148,7 +6076,7 @@ fn test_mesh_subtraction_with_topic_config() { assert_eq!( gs.mesh.get(&topics[0]).unwrap().len(), - peer_no, + mesh_n_high, "Initially all peers should be in the mesh" ); @@ -6163,6 +6091,60 @@ fn test_mesh_subtraction_with_topic_config() { ); } +/// Tests that if a mesh reaches `mesh_n_high`, +/// but is only composed of outbound peers, it is not reduced to `mesh_n`. +#[test] +fn test_mesh_subtraction_with_topic_config_min_outbound() { + let topic = String::from("topic1"); + let topic_hash = TopicHash::from_raw(topic.clone()); + + let mesh_n = 5; + let mesh_n_high = 7; + + let topic_config = TopicMeshConfig { + mesh_n, + mesh_n_high, + mesh_n_low: 3, + mesh_outbound_min: 7, + }; + + let config = ConfigBuilder::default() + .set_topic_config(topic_hash.clone(), topic_config) + .build() + .unwrap(); + + let peer_no = 12; + + // make all outbound connections. + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(peer_no) + .topics(vec![topic]) + .to_subscribe(true) + .gs_config(config.clone()) + .outbound(peer_no) + .create_network(); + + // graft all peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + mesh_n_high, + "Initially mesh should be {mesh_n_high}" + ); + + // run a heartbeat + gs.heartbeat(); + + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + mesh_n_high, + "After heartbeat, mesh should still be {mesh_n_high} as these are all outbound peers" + ); +} + /// Test behavior with multiple topics having different configs #[test] fn test_multiple_topics_with_different_configs() { From 61d436a1fca80d01007eb8a9b8aeb781fa5463ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 27 Oct 2025 15:46:26 +0000 Subject: [PATCH 45/68] chore: prepare libp2p-upnp v0.6.0 (#6188) ## Description Changelog was incorrect as we didn't release version `0.5.1` cc @RolandSherwin --- protocols/upnp/CHANGELOG.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md index 5d451d187d0..ed693bcf93f 100644 --- a/protocols/upnp/CHANGELOG.md +++ b/protocols/upnp/CHANGELOG.md @@ -1,17 +1,15 @@ ## 0.6.0 -- Change `Event::NewExternalAddr` and `Event::ExpiredExternalAddr` from tuple variants to struct variants - that include both local and external addresses. This allows users to correlate which local listen +- Change `Event::NewExternalAddr` and `Event::ExpiredExternalAddr` from tuple variants to struct variants + that include both local and external addresses. This allows users to correlate which local listen address was mapped to which external address. - `Event::NewExternalAddr` now contains `local_addr` and `external_addr` fields - `Event::ExpiredExternalAddr` now contains `local_addr` and `external_addr` fields See [PR 6121](https://github.com/libp2p/rust-libp2p/pull/6121). -## 0.5.1 - - Skip port mapping when an active port mapping is present. - Previously, the behavior would skip creating new mappings if any mapping - (active or inactive or pending) existed for the same port. Now it correctly only + Previously, the behavior would skip creating new mappings if any mapping + (active or inactive or pending) existed for the same port. Now it correctly only checks active mappings on the gateway. See [PR 6127](https://github.com/libp2p/rust-libp2p/pull/6127). From 954514071c660133a96efa616498d835fda3114d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 28 Oct 2025 09:41:14 +0000 Subject: [PATCH 46/68] fix sending partial messages to peers not supporting them --- protocols/gossipsub/src/behaviour.rs | 285 +++++++++------------ protocols/gossipsub/src/behaviour/tests.rs | 14 +- protocols/gossipsub/src/peer_score.rs | 1 + 3 files changed, 133 insertions(+), 167 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index a2267751408..90dcc3a33ad 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -658,7 +658,13 @@ where let topic_hash = raw_message.topic.clone(); - let recipient_peers = self.get_publish_peers(&topic_hash, true); + #[cfg(feature = "partial_messages")] + let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { + !peer.partial_only_topics.contains(&topic_hash) + }); + + #[cfg(not(feature = "partial_messages"))] + let recipient_peers = self.get_publish_peers(&topic_hash, |_, _| true); // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. @@ -715,129 +721,99 @@ where Ok(msg_id) } - // Get Peers from the mesh or fanout to publish a message to. - // If `exclude_partial_only` set, filter out peers who only want partial messages for the topic. + // Get Peers from the mesh or fanout to publish a message to + // filtered out further by the provided `f` callback. fn get_publish_peers( &mut self, topic_hash: &TopicHash, - exclude_partial_only: bool, + f: impl Fn(&PeerId, &PeerDetails) -> bool, ) -> HashSet { - let mesh_n = self.config.mesh_n_for_topic(topic_hash); - let peers_on_topic = self .connected_peers .iter() - .filter(|(_, peer)| { - #[cfg(feature = "partial_messages")] - { - if exclude_partial_only && peer.partial_only_topics.contains(topic_hash) { - return false; - } - } - let _ = peer; - true - }) - .map(|(peer_id, _)| peer_id) - .peekable(); - - let mut recipient_peers = HashSet::new(); - if self.config.flood_publish() { - // Forward to all peers above score and all explicit peers - recipient_peers.extend(peers_on_topic.filter(|p| { - self.explicit_peers.contains(*p) + .filter(|(_, peer)| peer.topics.contains(topic_hash)) + .filter(|(peer_id, _)| { + self.explicit_peers.contains(*peer_id) || !self .peer_score - .below_threshold(p, |ts| ts.publish_threshold) + .below_threshold(peer_id, |ts| ts.publish_threshold) .0 - })); - } else { - match self.mesh.get(topic_hash) { - // Mesh peers - Some(mesh_peers) => { - // We have a mesh set. We want to make sure to publish to at least `mesh_n` - // peers (if possible). - let needed_extra_peers = mesh_n.saturating_sub(mesh_peers.len()); - - if needed_extra_peers > 0 { - // We don't have `mesh_n` peers in our mesh, we will randomly select extras - // and publish to them. - - // Get a random set of peers that are appropriate to send messages too. - let peer_list = get_random_peers( - &self.connected_peers, - topic_hash, - needed_extra_peers, - exclude_partial_only, - |peer| { - !mesh_peers.contains(peer) - && !self.explicit_peers.contains(peer) - && !self - .peer_score - .below_threshold(peer, |ts| ts.publish_threshold) - .0 - }, - ); - recipient_peers.extend(peer_list); - } + }) + .filter(|(peer_id, peer_details)| f(peer_id, peer_details)); - recipient_peers.extend(mesh_peers); - } - // Gossipsub peers - None => { - tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); - // `fanout_peers` is always non-empty if it's `Some`. - let fanout_peers = self - .fanout - .get(topic_hash) - .filter(|peers| !peers.is_empty()); - // If we have fanout peers add them to the map. - if let Some(peers) = fanout_peers { - for peer in peers { - recipient_peers.insert(*peer); - } - } else { - // We have no fanout peers, select mesh_n of them and add them to the fanout - let new_peers = get_random_peers( - &self.connected_peers, - topic_hash, - mesh_n, - exclude_partial_only, - |p| { - !self.explicit_peers.contains(p) - && !self - .peer_score - .below_threshold(p, |ts| ts.publish_threshold) - .0 - }, - ); - // Add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - tracing::debug!(%peer, "Peer added to fanout"); - recipient_peers.insert(peer); - } - } - // We are publishing to fanout peers - update the time we published - self.fanout_last_pub - .insert(topic_hash.clone(), Instant::now()); + // Forward to all peers above score and all explicit peers + if self.config.flood_publish() { + return peers_on_topic.map(|(peer_id, _)| *peer_id).collect(); + } + + let mesh_n = self.config.mesh_n_for_topic(topic_hash); + let mut recipient_peers = HashSet::new(); + // Explicit peers that are part of the topic and Floodsub peers. + recipient_peers.extend( + peers_on_topic + .clone() + .filter(|(peer_id, peer)| { + self.explicit_peers.contains(peer_id) || peer.kind == PeerKind::Floodsub + }) + .map(|(peer_id, _)| *peer_id), + ); + + match self.mesh.get(topic_hash) { + // Mesh peers + Some(mesh_peers) => { + // We have a mesh set. We want to make sure to publish to at least `mesh_n` + // peers (if possible). + let mesh_peers = peers_on_topic + .clone() + .filter_map(|(peer_id, _)| mesh_peers.get(peer_id)) + .copied() + .collect::>(); + + let needed_extra_peers = mesh_n.saturating_sub(mesh_peers.len()); + if needed_extra_peers > 0 { + // We don't have `mesh_n` peers in our mesh, we will randomly select extras + // and publish to them. + + // Get a random set of peers that are appropriate to send messages too. + let peer_list = + get_random_peers(peers_on_topic, topic_hash, needed_extra_peers, |_, _| { + true + }); + recipient_peers.extend(peer_list); } - } - // Explicit peers that are part of the topic - recipient_peers - .extend(peers_on_topic.filter(|peer_id| self.explicit_peers.contains(peer_id))); + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + let fanout_peers = peers_on_topic + .clone() + .filter_map(|(peer_id, _)| { + self.fanout + .get(topic_hash) + .and_then(|fanout| fanout.get(peer_id)) + }) + .copied() + .collect::>(); - // Floodsub peers - for (peer, connections) in &self.connected_peers { - if connections.kind == PeerKind::Floodsub - && connections.topics.contains(topic_hash) - && !self - .peer_score - .below_threshold(peer, |ts| ts.publish_threshold) - .0 - { - recipient_peers.insert(*peer); + // If we have fanout peers add them to the map. + if !fanout_peers.is_empty() { + recipient_peers.extend(fanout_peers); + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let new_peers = + get_random_peers(peers_on_topic, topic_hash, mesh_n, |_, _| true); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); } } @@ -854,7 +830,9 @@ where let group_id = partial_message.group_id().as_ref().to_vec(); - let recipient_peers = self.get_publish_peers(&topic_hash, false); + let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { + peer.partial_only_topics.contains(&topic_hash) + }); let metadata = partial_message.parts_metadata().as_ref().to_vec(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` @@ -1151,12 +1129,11 @@ where &self.connected_peers, topic_hash, mesh_n - added_peers.len(), - true, - |peer| { - !added_peers.contains(peer) - && !self.explicit_peers.contains(peer) - && !self.peer_score.below_threshold(peer, |_| 0.0).0 - && !self.backoffs.is_backoff_with_slack(topic_hash, peer) + |peer_id, _| { + !added_peers.contains(peer_id) + && !self.explicit_peers.contains(peer_id) + && !self.peer_score.below_threshold(peer_id, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, peer_id) }, ); @@ -1246,8 +1223,9 @@ where &self.connected_peers, topic_hash, self.config.prune_peers(), - true, - |p| p != peer && !self.peer_score.below_threshold(p, |_| 0.0).0, + |peer_id, _| { + peer_id != peer && !self.peer_score.below_threshold(peer_id, |_| 0.0).0 + }, ) .into_iter() .map(|p| PeerInfo { peer_id: Some(p) }) @@ -2419,12 +2397,11 @@ where &self.connected_peers, topic_hash, desired_peers, - true, - |peer| { - !peers.contains(peer) - && !explicit_peers.contains(peer) - && !backoffs.is_backoff_with_slack(topic_hash, peer) - && scores.get(peer).map(|r| r.score).unwrap_or_default() >= 0.0 + |peer_id, _| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && !backoffs.is_backoff_with_slack(topic_hash, peer_id) + && scores.get(peer_id).map(|r| r.score).unwrap_or_default() >= 0.0 }, ); for peer in &peer_list { @@ -2527,8 +2504,7 @@ where &self.connected_peers, topic_hash, needed, - false, - |peer_id| { + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !backoffs.is_backoff_with_slack(topic_hash, peer_id) @@ -2604,8 +2580,7 @@ where &self.connected_peers, topic_hash, self.config.opportunistic_graft_peers(), - false, - |peer_id| { + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !backoffs.is_backoff_with_slack(topic_hash, peer_id) @@ -2701,8 +2676,7 @@ where &self.connected_peers, topic_hash, needed_peers, - false, - |peer_id| { + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !self @@ -2816,15 +2790,19 @@ where ) }; // get gossip_lazy random peers - let to_msg_peers = - get_random_peers_dynamic(&self.connected_peers, topic_hash, false, n_map, |peer| { - !peers.contains(peer) - && !self.explicit_peers.contains(peer) + let to_msg_peers = get_random_peers_dynamic( + self.connected_peers.iter(), + topic_hash, + n_map, + |peer_id, _| { + !peers.contains(peer_id) + && !self.explicit_peers.contains(peer_id) && !self .peer_score - .below_threshold(peer, |ts| ts.gossip_threshold) + .below_threshold(peer_id, |ts| ts.gossip_threshold) .0 - }); + }, + ); tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); @@ -3787,28 +3765,17 @@ fn peer_removed_from_mesh( /// Helper function to get a subset of random gossipsub peers for a `topic_hash` /// filtered by the function `f`. The number of peers to get equals the output of `n_map` /// that gets as input the number of filtered peers. -#[allow(unused, reason = "partial is used with partial_messages feature")] -fn get_random_peers_dynamic( - connected_peers: &HashMap, +fn get_random_peers_dynamic<'a>( + peers: impl IntoIterator, topic_hash: &TopicHash, - // If we want to exclude partial only peers. - exclude_partial: bool, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, - mut f: impl FnMut(&PeerId) -> bool, + f: impl Fn(&PeerId, &PeerDetails) -> bool, ) -> BTreeSet { - let mut gossip_peers = connected_peers - .iter() - .filter_map(|(peer_id, peer)| { - #[cfg(feature = "partial_messages")] - { - if exclude_partial && peer.partial_only_topics.contains(topic_hash) { - return None; - } - } - Some((peer_id, peer)) - }) - .filter(|(peer_id, _)| f(peer_id)) + let mut gossip_peers = peers + .into_iter() + .filter(|(_, p)| p.topics.contains(topic_hash)) + .filter(|(peer_id, peer_details)| f(peer_id, peer_details)) .filter(|(_, p)| p.kind.is_gossipsub()) .map(|(peer_id, _)| *peer_id) .collect::>(); @@ -3831,15 +3798,13 @@ fn get_random_peers_dynamic( /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` /// filtered by the function `f`. -#[allow(unused, reason = "partial is used with partial_messages feature")] -fn get_random_peers( - connected_peers: &HashMap, +fn get_random_peers<'a>( + peers: impl IntoIterator, topic_hash: &TopicHash, n: usize, - exclude_partial: bool, - f: impl FnMut(&PeerId) -> bool, + f: impl Fn(&PeerId, &PeerDetails) -> bool, ) -> BTreeSet { - get_random_peers_dynamic(connected_peers, topic_hash, exclude_partial, |_| n, f) + get_random_peers_dynamic(peers, topic_hash, |_| n, f) } /// Validates the combination of signing, privacy and message validation to ensure the diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 48de42de76f..25fae82b6b8 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -1072,27 +1072,27 @@ fn test_get_random_peers() { ); } - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, false, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_, _| true); assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, false, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, |_, _| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, false, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, |_, _| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, false, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, |_, _| true); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); // test the filter - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, false, |_| false); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_, _| false); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, false, { - |peer| peers.contains(peer) + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, { + |peer_id, _| peers.contains(peer_id) }); assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index b0f4f1a433c..06d9563a850 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -533,6 +533,7 @@ impl PeerScore { } /// Indicate that a peer has sent us invalid partial message data. + #[cfg(feature = "partial_messages")] pub(crate) fn reject_invalid_partial(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { if let Some(topic_stats) = From dd21893d23073295dfcea66c5496e92d3a000e73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 29 Oct 2025 15:06:28 +0000 Subject: [PATCH 47/68] update to latest spec changes --- protocols/gossipsub/src/behaviour.rs | 76 +- protocols/gossipsub/src/behaviour/tests.rs | 66 +- .../gossipsub/src/generated/gossipsub/pb.rs | 12 +- protocols/gossipsub/src/generated/rpc.proto | 10 +- protocols/gossipsub/src/protocol.rs | 7 +- .../gossipsub/src/subscription_filter.rs | 46 +- protocols/gossipsub/src/types.rs | 31 +- protocols/gossipsub/tests/smoke.rs | 2 +- types.rs | 710 ++++++++++++++++++ 9 files changed, 876 insertions(+), 84 deletions(-) create mode 100644 types.rs diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 90dcc3a33ad..3b8e4da2c74 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -77,7 +77,7 @@ use crate::{ #[cfg(feature = "partial_messages")] use crate::{ partial::{Partial, PublishAction}, - types::PartialMessage, + types::{PartialMessage, PartialSubOpts}, }; #[cfg(test)] @@ -306,9 +306,9 @@ pub struct Behaviour { /// Overlay network of connected peers - Maps topics to connected gossipsub peers. mesh: HashMap>, - /// Partial only subscribed topics. + /// Partial options when subscribing topics. #[cfg(feature = "partial_messages")] - partial_only_topics: BTreeSet, + partial_opts: HashMap, /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -474,7 +474,7 @@ where failed_messages: Default::default(), gossip_promises: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }) } @@ -541,7 +541,8 @@ where pub fn subscribe( &mut self, topic: &Topic, - #[cfg(feature = "partial_messages")] partial_only: bool, + #[cfg(feature = "partial_messages")] requests_partial: bool, + #[cfg(feature = "partial_messages")] supports_partial: bool, ) -> Result { let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { @@ -559,7 +560,10 @@ where let event = RpcOut::Subscribe { topic: topic_hash.clone(), #[cfg(feature = "partial_messages")] - partial_only, + partial_opts: PartialSubOpts { + requests_partial, + supports_partial, + }, }; self.send_message(peer_id, event); } @@ -569,9 +573,13 @@ where self.join(&topic_hash); #[cfg(feature = "partial_messages")] { - if partial_only { - self.partial_only_topics.insert(topic_hash.clone()); - } + self.partial_opts.insert( + topic_hash.clone(), + PartialSubOpts { + requests_partial, + supports_partial, + }, + ); } tracing::debug!(%topic, "Subscribed to topic"); @@ -602,7 +610,7 @@ where self.leave(&topic_hash); #[cfg(feature = "partial_messages")] { - self.partial_only_topics.remove(&topic_hash.clone()); + self.partial_opts.remove(&topic_hash.clone()); } tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); @@ -660,7 +668,10 @@ where #[cfg(feature = "partial_messages")] let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { - !peer.partial_only_topics.contains(&topic_hash) + peer.partial_opts + .get(&topic_hash) + .map(|opts| !opts.requests_partial) + .unwrap_or(true) }); #[cfg(not(feature = "partial_messages"))] @@ -831,20 +842,42 @@ where let group_id = partial_message.group_id().as_ref().to_vec(); let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { - peer.partial_only_topics.contains(&topic_hash) + peer.partial_opts + .get(&topic_hash) + .map(|opts| opts.supports_partial) + .unwrap_or_default() }); let metadata = partial_message.parts_metadata().as_ref().to_vec(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { tracing::error!(peer = %peer_id, - "Could not send rpc to connection handler, peer doesn't exist in connected peer list"); + "Could not get peer from connected peers, peer doesn't exist in connected peer list"); + continue; + }; + let Some(partial_opts) = peer.partial_opts.get(&topic_hash) else { + tracing::error!(peer = %peer_id, + "Could not get partial subscripion options from peer which subscribed for partial messages"); continue; }; let peer_partials = peer.partial_messages.entry(topic_hash.clone()).or_default(); let peer_partial = peer_partials.entry(group_id.clone()).or_default(); + // Peer `supports_partial` but doesn't `requests_partial`. + if !partial_opts.requests_partial { + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: None, + metadata: metadata.clone(), + group_id: group_id.clone(), + topic_id: topic_hash.clone(), + }, + ); + continue; + } + let Ok(action) = partial_message.partial_message_bytes_from_metadata(peer_partial.metadata.as_ref()) else { @@ -856,7 +889,6 @@ where } continue; }; - let message = match action { PublishAction::SameMetadata => { // No new data to send peer. @@ -2140,9 +2172,8 @@ where SubscriptionAction::Subscribe => { #[cfg(feature = "partial_messages")] { - if subscription.partial { - peer.partial_only_topics.insert(topic_hash.clone()); - } + peer.partial_opts + .insert(topic_hash.clone(), subscription.partial_opts); } if peer.topics.insert(topic_hash.clone()) { tracing::debug!( @@ -3178,12 +3209,17 @@ where tracing::debug!(peer=%peer_id, "New peer connected"); // We need to send our subscriptions to the newly-connected node. for topic_hash in self.mesh.clone().into_keys() { + #[cfg(feature = "partial_messages")] + let Some(partial_opts) = self.partial_opts.get(&topic_hash).copied() else { + tracing::error!("Partial subscription options should exist for subscribed topic"); + return; + }; self.send_message( peer_id, RpcOut::Subscribe { topic: topic_hash.clone(), #[cfg(feature = "partial_messages")] - partial_only: self.partial_only_topics.contains(&topic_hash), + partial_opts, }, ); } @@ -3369,7 +3405,7 @@ where #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); @@ -3416,7 +3452,7 @@ where #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index 25fae82b6b8..6a671e7f8d0 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -89,6 +89,8 @@ where &topic, #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ) .unwrap(); topic_hashes.push(topic.hash().clone()); @@ -254,7 +256,7 @@ where #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -284,7 +286,7 @@ where action: SubscriptionAction::Subscribe, topic_hash: t, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }) .collect::>(), &peer, @@ -425,7 +427,7 @@ fn proto_to_message(rpc: &proto::RPC) -> RpcIn { }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }) .collect(), control_msgs, @@ -589,6 +591,8 @@ fn test_join() { gs.subscribe( &topics[0], #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] false ) .unwrap(), @@ -657,7 +661,7 @@ fn test_join() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); queues.insert(random_peer, receiver_queue); @@ -685,6 +689,8 @@ fn test_join() { &topics[1], #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ) .unwrap(); @@ -942,7 +948,7 @@ fn test_handle_received_subscriptions() { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }) .collect::>(); @@ -950,7 +956,7 @@ fn test_handle_received_subscriptions() { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }); let unknown_peer = PeerId::random(); @@ -1009,7 +1015,7 @@ fn test_handle_received_subscriptions() { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[0].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }], &peers[0], ); @@ -1067,7 +1073,7 @@ fn test_get_random_peers() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); } @@ -1746,7 +1752,7 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: Default::default(), }], peer, ); @@ -1757,6 +1763,8 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { &topic, #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ) .unwrap(); @@ -1801,7 +1809,7 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }], peer, ); @@ -1815,6 +1823,8 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { &topic, #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ) .unwrap(); @@ -2225,6 +2235,8 @@ fn test_unsubscribe_backoff() { &Topic::new(topics[0].to_string()), #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ); // forget all events until now @@ -3066,7 +3078,7 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { action: SubscriptionAction::Subscribe, topic_hash: topics[0].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }; let control_action = ControlAction::IHave(IHave { @@ -5189,6 +5201,8 @@ fn test_subscribe_to_invalid_topic() { .subscribe( &t1, #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] false ) .is_ok()); @@ -5196,6 +5210,8 @@ fn test_subscribe_to_invalid_topic() { .subscribe( &t2, #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] false ) .is_err()); @@ -5231,6 +5247,8 @@ fn test_subscribe_and_graft_with_negative_score() { &topic, #[cfg(feature = "partial_messages")] false, + #[cfg(feature = "partial_messages")] + false, ) .unwrap(); @@ -5566,7 +5584,7 @@ fn test_all_queues_full() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -5608,7 +5626,7 @@ fn test_slow_peer_returns_failed_publish() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); let peer_id = PeerId::random(); @@ -5626,7 +5644,7 @@ fn test_slow_peer_returns_failed_publish() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -5683,7 +5701,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5705,7 +5723,7 @@ fn test_slow_peer_returns_failed_ihave_handling() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -5798,7 +5816,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5820,7 +5838,7 @@ fn test_slow_peer_returns_failed_iwant_handling() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -5893,7 +5911,7 @@ fn test_slow_peer_returns_failed_forward() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5915,7 +5933,7 @@ fn test_slow_peer_returns_failed_forward() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -5993,7 +6011,7 @@ fn test_slow_peer_is_downscored_on_publish() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); gs.as_peer_score_mut().add_peer(slow_peer_id); @@ -6012,7 +6030,7 @@ fn test_slow_peer_is_downscored_on_publish() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); @@ -6395,6 +6413,8 @@ fn test_multiple_topics_with_different_configs() { gs.subscribe( &Topic::new(topic_hashes[0].to_string()), #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] false ) .unwrap(), @@ -6852,7 +6872,7 @@ fn test_handle_extensions_message() { #[cfg(feature = "partial_messages")] partial_messages: Default::default(), #[cfg(feature = "partial_messages")] - partial_only_topics: Default::default(), + partial_opts: Default::default(), }, ); diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 72c674f9fdf..6aa7fd4eeeb 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -70,7 +70,8 @@ use super::*; pub struct SubOpts { pub subscribe: Option, pub topic_id: Option, - pub partial: Option, + pub requestsPartial: Option, + pub supportsPartial: Option, } impl<'a> MessageRead<'a> for SubOpts { @@ -80,7 +81,8 @@ impl<'a> MessageRead<'a> for SubOpts { match r.next_tag(bytes) { Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?), Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), - Ok(24) => msg.partial = Some(r.read_bool(bytes)?), + Ok(24) => msg.requestsPartial = Some(r.read_bool(bytes)?), + Ok(32) => msg.supportsPartial = Some(r.read_bool(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -94,13 +96,15 @@ impl MessageWrite for SubOpts { 0 + self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) - + self.partial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.requestsPartial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.supportsPartial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) } fn write_message(&self, w: &mut Writer) -> Result<()> { if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; } if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; } - if let Some(ref s) = self.partial { w.write_with_tag(24, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.requestsPartial { w.write_with_tag(24, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.supportsPartial { w.write_with_tag(32, |w| w.write_bool(*s))?; } Ok(()) } } diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index bb908db2756..664211373fb 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -10,9 +10,13 @@ message RPC { optional bool subscribe = 1; // subscribe or unsubscribe optional string topic_id = 2; // Used with Partial Messages extension. - // If set, the receiver of this message MUST send partial messages to the - // sender instead of full messages. - optional bool partial = 3; + // If set to true, signals to the receiver that the sender prefers partial + // messages. + optional bool requestsPartial = 3; + // If set to true, signals to the receiver that the sender supports sending + // partial messages on this topic. If requestsPartial is true, this is + // assumed to be true. + optional bool supportsPartial = 4; } optional ControlMessage control = 3; diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 335ba1c307a..54fe553e7f2 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -30,7 +30,7 @@ use libp2p_swarm::StreamProtocol; use quick_protobuf::{MessageWrite, Writer}; #[cfg(feature = "partial_messages")] -use crate::types::PartialMessage; +use crate::types::{PartialMessage, PartialSubOpts}; use crate::{ config::ValidationMode, handler::HandlerEvent, @@ -612,7 +612,10 @@ impl Decoder for GossipsubCodec { }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), #[cfg(feature = "partial_messages")] - partial: sub.partial.unwrap_or_default(), + partial_opts: PartialSubOpts { + requests_partial: sub.requestsPartial.unwrap_or_default(), + supports_partial: sub.supportsPartial.unwrap_or_default(), + }, }) .collect(), control_msgs, diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 893c5eefe21..224de864935 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -211,6 +211,8 @@ impl TopicSubscriptionFilter for RegexSubscriptionFilter { #[cfg(test)] mod test { use super::*; + #[cfg(feature = "partial_messages")] + use crate::types::PartialSubOpts; use crate::types::SubscriptionAction::*; #[test] @@ -226,31 +228,31 @@ mod test { action: Unsubscribe, topic_hash: t1.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t2.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t1.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t1, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -273,13 +275,13 @@ mod test { action: Subscribe, topic_hash: t1, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -306,19 +308,19 @@ mod test { action: Subscribe, topic_hash: t1.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t1.clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t1, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -345,13 +347,13 @@ mod test { action: Subscribe, topic_hash: t[2].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[3].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -378,31 +380,31 @@ mod test { action: Subscribe, topic_hash: t[4].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[2].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[3].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t[0].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t[1].clone(), #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -425,13 +427,13 @@ mod test { action: Subscribe, topic_hash: t1, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; @@ -455,19 +457,19 @@ mod test { action: Subscribe, topic_hash: t1, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t3, #[cfg(feature = "partial_messages")] - partial: false, + partial_opts: PartialSubOpts::default(), }, ]; diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index d22179ea1a9..12f939d6e91 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -111,9 +111,17 @@ pub(crate) struct PeerDetails { #[cfg(feature = "partial_messages")] pub(crate) partial_messages: HashMap, PartialData>>, - /// Partial only subscribed topics. + /// Partial options for subscribed topics #[cfg(feature = "partial_messages")] - pub(crate) partial_only_topics: BTreeSet, + pub(crate) partial_opts: HashMap, +} + +/// Partial ptions when subscribing a topic. +#[cfg(feature = "partial_messages")] +#[derive(Debug, Clone, Copy, Default, Eq, Hash, PartialEq)] +pub struct PartialSubOpts { + pub(crate) requests_partial: bool, + pub(crate) supports_partial: bool, } /// Stored `Metadata` for a peer. @@ -276,9 +284,9 @@ pub struct Subscription { pub action: SubscriptionAction, /// The topic from which to subscribe or unsubscribe. pub topic_hash: TopicHash, - /// Peer only wants to receive partial messages instead of full messages. + /// Partial options. #[cfg(feature = "partial_messages")] - pub partial: bool, + pub partial_opts: PartialSubOpts, } /// Action that a subscription wants to perform. @@ -403,7 +411,7 @@ pub enum RpcOut { Subscribe { topic: TopicHash, #[cfg(feature = "partial_messages")] - partial_only: bool, + partial_opts: PartialSubOpts, }, /// Unsubscribe a topic. Unsubscribe(TopicHash), @@ -476,16 +484,20 @@ impl From for proto::RPC { RpcOut::Subscribe { topic, #[cfg(feature = "partial_messages")] - partial_only, + partial_opts, } => proto::RPC { publish: Vec::new(), subscriptions: vec![proto::SubOpts { subscribe: Some(true), topic_id: Some(topic.into_string()), + #[cfg(feature = "partial_messages")] + requestsPartial: Some(partial_opts.requests_partial), #[cfg(not(feature = "partial_messages"))] - partial: None, + requestsPartial: None, #[cfg(feature = "partial_messages")] - partial: Some(partial_only), + supportsPartial: Some(partial_opts.supports_partial), + #[cfg(not(feature = "partial_messages"))] + supportsPartial: None, }], control: None, testExtension: None, @@ -496,7 +508,8 @@ impl From for proto::RPC { subscriptions: vec![proto::SubOpts { subscribe: Some(false), topic_id: Some(topic.into_string()), - partial: None, + requestsPartial: None, + supportsPartial: None, }], control: None, testExtension: None, diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index d519d8203e5..b474eb4f0aa 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -152,7 +152,7 @@ fn multi_hop_propagation() { // Subscribe each node to the same topic. let topic = gossipsub::IdentTopic::new("test-net"); for node in &mut graph.nodes { - node.behaviour_mut().subscribe(&topic, #[cfg(feature = "partial_messages")] false).unwrap(); + node.behaviour_mut().subscribe(&topic, #[cfg(feature = "partial_messages")] false, #[cfg(feature = "partial_messages")] false).unwrap(); } // Wait for all nodes to be subscribed. diff --git a/types.rs b/types.rs new file mode 100644 index 00000000000..5f7c0618c33 --- /dev/null +++ b/types.rs @@ -0,0 +1,710 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! A collection of types using the Gossipsub system. +#[cfg(feature = "partial_messages")] +use std::collections::HashMap; +use std::{ + collections::BTreeSet, + fmt::{self, Debug}, +}; + +use futures_timer::Delay; +use hashlink::LinkedHashMap; +use libp2p_identity::PeerId; +use libp2p_swarm::ConnectionId; +use quick_protobuf::MessageWrite; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use web_time::Instant; + +use crate::{queue::Queue, rpc_proto::proto, TopicHash}; + +/// Messages that have expired while attempting to be sent to a peer. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct FailedMessages { + /// The number of messages that were failed to be sent to the priority queue as it was + /// full. + pub priority: usize, + /// The number of messages that were failed to be sent to the non priority queue as it was + /// full. + pub non_priority: usize, +} + +#[derive(Debug)] +/// Validation kinds from the application for received messages. +pub enum MessageAcceptance { + /// The message is considered valid, and it should be delivered and forwarded to the network. + Accept, + /// The message is considered invalid, and it should be rejected and trigger the Pâ‚„ penalty. + Reject, + /// The message is neither delivered nor forwarded to the network, but the router does not + /// trigger the Pâ‚„ penalty. + Ignore, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct MessageId(pub Vec); + +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} + +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } +} + +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} + +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} + +#[derive(Debug)] +/// Connected peer details. +pub(crate) struct PeerDetails { + /// The kind of protocol the peer supports. + pub(crate) kind: PeerKind, + /// The Extensions supported by the peer if any. + pub(crate) extensions: Option, + /// If the peer is an outbound connection. + pub(crate) outbound: bool, + /// Its current connections. + pub(crate) connections: Vec, + /// Subscribed topics. + pub(crate) topics: BTreeSet, + /// Don't send messages. + pub(crate) dont_send: LinkedHashMap, + + /// Message queue consumed by the connection handler. + pub(crate) messages: Queue, + + /// Peer Partial messages. + #[cfg(feature = "partial_messages")] + pub(crate) partial_messages: HashMap, PartialData>>, + + /// Partial only subscribed topics. + #[cfg(feature = "partial_messages")] + pub(crate) partial_only_topics: BTreeSet, +} + +/// Stored `Metadata` for a peer. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) enum PeerMetadata { + Remote(Vec), + Local(Box), +} + +#[cfg(feature = "partial_messages")] +impl AsRef<[u8]> for PeerMetadata { + fn as_ref(&self) -> &[u8] { + match self { + PeerMetadata::Remote(metadata) => metadata, + PeerMetadata::Local(metadata) => metadata.as_slice(), + } + } +} + +/// The partial message data the peer has. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) struct PartialData { + /// The current peer partial metadata. + pub(crate) metadata: Option, + /// The remaining heartbeats for this message to be deleted. + pub(crate) ttl: usize, +} + +#[cfg(feature = "partial_messages")] +impl Default for PartialData { + fn default() -> Self { + Self { + metadata: Default::default(), + ttl: 5, + } + } +} + +/// Describes the types of peers that can exist in the gossipsub context. +#[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)] +#[cfg_attr( + feature = "metrics", + derive(prometheus_client::encoding::EncodeLabelValue) +)] +pub enum PeerKind { + /// A gossipsub 1.3 peer. + Gossipsubv1_3, + /// A gossipsub 1.2 peer. + Gossipsubv1_2, + /// A gossipsub 1.1 peer. + Gossipsubv1_1, + /// A gossipsub 1.0 peer. + Gossipsub, + /// A floodsub peer. + Floodsub, + /// The peer doesn't support any of the protocols. + NotSupported, +} + +/// A message received by the gossipsub system and stored locally in caches.. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct RawMessage { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. Its meaning is out of scope of this library. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, + + /// The signature of the message if it's signed. + pub signature: Option>, + + /// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined. + pub key: Option>, + + /// Flag indicating if this message has been validated by the application or not. + pub validated: bool, +} + +impl PeerKind { + /// Returns true if peer speaks any gossipsub version. + pub(crate) fn is_gossipsub(&self) -> bool { + matches!( + self, + Self::Gossipsubv1_2 | Self::Gossipsubv1_1 | Self::Gossipsub + ) + } +} + +impl RawMessage { + /// Calculates the encoded length of this message (used for calculating metrics). + pub fn raw_protobuf_len(&self) -> usize { + let message = proto::Message { + from: self.source.map(|m| m.to_bytes()), + data: Some(self.data.clone()), + seqno: self.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(self.topic.clone()), + signature: self.signature.clone(), + key: self.key.clone(), + }; + message.get_size() + } +} + +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + +/// The message sent to the user after a [`RawMessage`] has been transformed by a +/// [`crate::DataTransform`]. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Message { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, +} + +impl fmt::Debug for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Message") + .field( + "data", + &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)), + ) + .field("source", &self.source) + .field("sequence_number", &self.sequence_number) + .field("topic", &self.topic) + .finish() + } +} + +/// A subscription received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Subscription { + /// Action to perform. + pub action: SubscriptionAction, + /// The topic from which to subscribe or unsubscribe. + pub topic_hash: TopicHash, + /// Peer only wants to receive partial messages instead of full messages. + #[cfg(feature = "partial_messages")] + pub partial: bool, +} + +/// Action that a subscription wants to perform. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum SubscriptionAction { + /// The remote wants to subscribe to the given topic. + Subscribe, + /// The remote wants to unsubscribe from the given topic. + Unsubscribe, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct PeerInfo { + pub(crate) peer_id: Option, + // TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // https://github.com/libp2p/specs/pull/217) + // pub signed_peer_record: ?, +} + +/// A Control message received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum ControlAction { + /// Node broadcasts known messages per topic - IHave control message. + IHave(IHave), + /// The node requests specific message ids (peer_id + sequence _number) - IWant control + /// message. + IWant(IWant), + /// The node has been added to the mesh - Graft control message. + Graft(Graft), + /// The node has been removed from the mesh - Prune control message. + Prune(Prune), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), + /// The Node has sent us its supported extensions. + Extensions(Option), +} + +/// Node broadcasts known messages per topic - IHave control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IHave { + /// The topic of the messages. + pub(crate) topic_hash: TopicHash, + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node requests specific message ids (peer_id + sequence _number) - IWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IWant { + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node has been added to the mesh - Graft control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Graft { + /// The mesh topic the peer should be added to. + pub(crate) topic_hash: TopicHash, +} + +/// The node has been removed from the mesh - Prune control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Prune { + /// The mesh topic the peer should be removed from. + pub(crate) topic_hash: TopicHash, + /// A list of peers to be proposed to the removed peer as peer exchange + pub(crate) peers: Vec, + /// The backoff time in seconds before we allow to reconnect + pub(crate) backoff: Option, +} + +/// The node requests us to not forward message ids - IDontWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IDontWant { + /// A list of known message ids. + pub(crate) message_ids: Vec, +} + +/// A received partial message. +#[cfg(feature = "partial_messages")] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct PartialMessage { + /// The topic ID this partial message belongs to. + pub topic_id: TopicHash, + /// The group ID that identifies the complete logical message. + pub group_id: Vec, + /// The partial metadata we have and we want. + pub metadata: Option>, + /// The partial message itself. + pub message: Option>, +} + +/// The node has sent us the supported Gossipsub Extensions. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Extensions { + pub(crate) test_extension: Option, + pub(crate) partial_messages: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct TestExtension {} + +/// A Gossipsub RPC message sent. +#[derive(Debug)] +pub enum RpcOut { + /// Publish a Gossipsub message on network.`timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Publish { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, + /// Forward a Gossipsub message on network. `timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Forward { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, + /// Subscribe a topic. + Subscribe { + topic: TopicHash, + #[cfg(feature = "partial_messages")] + requests_partials: bool, + }, + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// Send a GRAFT control message. + Graft(Graft), + /// Send a PRUNE control message. + Prune(Prune), + /// Send a IHave control message. + IHave(IHave), + /// Send a IWant control message. + IWant(IWant), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), + /// Send a Extensions control message. + Extensions(Extensions), + /// Send a test extension message. + TestExtension, + /// Send a partial messages extension. + PartialMessage { + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The topic ID this partial message belongs to. + topic_id: TopicHash, + /// The partial message itself. + message: Option>, + /// The partial metadata we have and want. + metadata: Vec, + }, +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } + + /// Returns true if the `RpcOut` is priority. + pub(crate) fn priority(&self) -> bool { + matches!( + self, + RpcOut::Subscribe { .. } + | RpcOut::Unsubscribe(_) + | RpcOut::Graft(_) + | RpcOut::Prune(_) + | RpcOut::IDontWant(_) + ) + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish { message, .. } => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Forward { message, .. } => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Subscribe { + topic, + #[cfg(feature = "partial_messages")] + requests_partials: partial_only, + } => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + #[cfg(not(feature = "partial_messages"))] + partial: None, + #[cfg(feature = "partial_messages")] + requestsPartial: Some(partial_only), + }], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + requestsPartial: None, + }], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::IWant(IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Graft(Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + } + } + RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Extensions(Extensions { + partial_messages, + test_extension, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: Some(proto::ControlExtensions { + testExtension: test_extension, + partialMessages: partial_messages, + }), + }), + testExtension: None, + partial: None, + }, + RpcOut::TestExtension => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: Some(proto::TestExtension {}), + partial: None, + }, + RpcOut::PartialMessage { + topic_id, + group_id, + metadata, + message, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + partialMessage: message, + partsMetadata: Some(metadata), + }), + }, + } + } +} + +/// A Gossipsub RPC message received. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct RpcIn { + /// List of messages that were part of this RPC query. + pub messages: Vec, + /// List of subscriptions. + pub subscriptions: Vec, + /// List of Gossipsub control messages. + pub control_msgs: Vec, + /// Gossipsub test extension. + pub test_extension: Option, + /// Partial messages extension. + #[cfg(feature = "partial_messages")] + pub partial_message: Option, +} + +impl fmt::Debug for RpcIn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut b = f.debug_struct("GossipsubRpc"); + if !self.messages.is_empty() { + b.field("messages", &self.messages); + } + if !self.subscriptions.is_empty() { + b.field("subscriptions", &self.subscriptions); + } + if !self.control_msgs.is_empty() { + b.field("control_msgs", &self.control_msgs); + } + #[cfg(feature = "partial_messages")] + b.field("partial_messages", &self.partial_message); + + b.finish() + } +} + +impl PeerKind { + pub fn as_static_ref(&self) -> &'static str { + match self { + Self::NotSupported => "Not Supported", + Self::Floodsub => "Floodsub", + Self::Gossipsub => "Gossipsub v1.0", + Self::Gossipsubv1_1 => "Gossipsub v1.1", + Self::Gossipsubv1_2 => "Gossipsub v1.2", + Self::Gossipsubv1_3 => "Gossipsub v1.3", + } + } +} + +impl AsRef for PeerKind { + fn as_ref(&self) -> &str { + self.as_static_ref() + } +} + +impl fmt::Display for PeerKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} From d97ae1f50865647d6bb2fda1c59e9f7ec0b8a4f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 31 Oct 2025 16:14:00 +0000 Subject: [PATCH 48/68] emmit gosipsup event whenever partial data was updated --- protocols/gossipsub/src/behaviour.rs | 34 ++++++++++++++++------------ 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 3b8e4da2c74..799107e4f1c 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1715,28 +1715,22 @@ where .entry(partial_message.group_id.clone()) .or_default(); - match (&mut peer_partial.metadata, &partial_message.metadata) { + let metadata_updated = match (&mut peer_partial.metadata, &partial_message.metadata) { (None, Some(remote_metadata)) => { - peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())) + peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + true } (Some(PeerMetadata::Remote(ref metadata)), Some(remote_metadata)) => { if metadata != remote_metadata { peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + true + } else { + false } } (Some(PeerMetadata::Local(metadata)), Some(remote_metadata)) => { match metadata.update(remote_metadata) { - Ok(true) => { - self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial { - topic_id: partial_message.topic_id, - propagation_source: *peer_id, - group_id: partial_message.group_id, - message: partial_message.message, - metadata: partial_message.metadata, - })); - } - Ok(false) => {} + Ok(updated) => updated, Err(err) => { tracing::debug!( peer=%peer_id, @@ -1748,10 +1742,22 @@ where if let PeerScoreState::Active(peer_score) = &mut self.peer_score { peer_score.reject_invalid_partial(peer_id, &partial_message.topic_id); } + false } } } - (Some(_), None) | (None, None) => {} + (Some(_), None) | (None, None) => false, + }; + + if metadata_updated { + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); } } From 0c250045eef4a9ce58d029134509a8c8ea1507b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 4 Nov 2025 18:54:41 +0000 Subject: [PATCH 49/68] chore(ci): fix intra doc links job Pull-Request: #6198. --- protocols/gossipsub/src/behaviour.rs | 4 ++-- protocols/kad/src/behaviour.rs | 17 +++++++++-------- swarm/src/handler.rs | 4 ++-- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 3d516098a7c..ee451904d01 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -511,8 +511,8 @@ where /// Subscribe to a topic. /// - /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already - /// subscribed. + /// Returns [`Ok(true)`](Ok) if the subscription worked. Returns [`Ok(false)`](Ok) if we were + /// already subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index f5f44baec74..812f13e9110 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -724,7 +724,7 @@ where /// Initiates an iterative query for the closest peers to the given key. /// /// The result of the query is delivered in a - /// [`Event::OutboundQueryProgressed{QueryResult::GetClosestPeers}`]. + /// [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::GetClosestPeers`]. pub fn get_closest_peers(&mut self, key: K) -> QueryId where K: Into> + Into> + Clone, @@ -737,7 +737,7 @@ where /// Note that the result is capped after exceeds K_VALUE /// /// The result of the query is delivered in a - /// [`Event::OutboundQueryProgressed{QueryResult::GetClosestPeers}`]. + /// [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::GetClosestPeers`]. pub fn get_n_closest_peers(&mut self, key: K, num_results: NonZeroUsize) -> QueryId where K: Into> + Into> + Clone, @@ -794,7 +794,7 @@ where /// Performs a lookup for a record in the DHT. /// /// The result of this operation is delivered in a - /// [`Event::OutboundQueryProgressed{QueryResult::GetRecord}`]. + /// [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::GetRecord`]. pub fn get_record(&mut self, key: record::Key) -> QueryId { let record = if let Some(record) = self.store.get(&key) { if record.is_expired(Instant::now()) { @@ -853,7 +853,7 @@ where /// Returns `Ok` if a record has been stored locally, providing the /// `QueryId` of the initial query that replicates the record in the DHT. /// The result of the query is eventually reported as a - /// [`Event::OutboundQueryProgressed{QueryResult::PutRecord}`]. + /// [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::PutRecord`]. /// /// The record is always stored locally with the given expiration. If the record's /// expiration is `None`, the common case, it does not expire in local storage @@ -967,8 +967,8 @@ where /// /// Returns `Ok` if bootstrapping has been initiated with a self-lookup, providing the /// `QueryId` for the entire bootstrapping process. The progress of bootstrapping is - /// reported via [`Event::OutboundQueryProgressed{QueryResult::Bootstrap}`] events, - /// with one such event per bootstrapping query. + /// reported via [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::Bootstrap`] + /// events, with one such event per bootstrapping query. /// /// Returns `Err` if bootstrapping is impossible due an empty routing table. /// @@ -1020,7 +1020,8 @@ where /// of the libp2p Kademlia provider API. /// /// The results of the (repeated) provider announcements sent by this node are - /// reported via [`Event::OutboundQueryProgressed{QueryResult::StartProviding}`]. + /// reported via [`Event::OutboundQueryProgressed`] with `result` + /// [`QueryResult::StartProviding`]. pub fn start_providing(&mut self, key: record::Key) -> Result { // Note: We store our own provider records locally without local addresses // to avoid redundant storage and outdated addresses. Instead these are @@ -1056,7 +1057,7 @@ where /// Performs a lookup for providers of a value to the given key. /// /// The result of this operation is delivered in a - /// reported via [`Event::OutboundQueryProgressed{QueryResult::GetProviders}`]. + /// reported via [`Event::OutboundQueryProgressed`] with `result` [`QueryResult::GetProviders`]. pub fn get_providers(&mut self, key: record::Key) -> QueryId { let providers: HashSet<_> = self .store diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index 0e4d1f5a325..633f75304c0 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -171,10 +171,10 @@ pub trait ConnectionHandler: Send + 'static { /// This is also called when the shutdown was initiated due to an error on the connection. /// We therefore cannot guarantee that performing IO within here will succeed. /// - /// To signal completion, [`Poll::Ready(None)`] should be returned. + /// To signal completion, [`Poll::Ready(None)`](Poll::Ready) should be returned. /// /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. - /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to + /// That is, [`Poll::Ready(None)`](Poll::Ready) MUST be returned on repeated calls to /// [`ConnectionHandler::poll_close`]. fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(None) From 3954cfddb898b010e334598ecaff39c88c2c92d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 5 Nov 2025 12:46:40 +0000 Subject: [PATCH 50/68] chore: fix clippy lints will address the intra doc links issue on a subsequent PR, the remaining [issue](https://github.com/libp2p/rust-libp2p/actions/runs/19040315461/job/54375441096?pr=6195#step:6:760) seems like a bug, opened https://github.com/rust-lang/rust-clippy/issues/16023. Pull-Request: #6195. --- protocols/dcutr/src/handler/relayed.rs | 6 +----- protocols/identify/src/handler.rs | 1 + swarm/tests/swarm_derive.rs | 12 ++++++------ 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index af84d8fe9b1..43e433a5268 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -148,12 +148,8 @@ impl Handler { fn on_listen_upgrade_error( &mut self, - ListenUpgradeError { error, .. }: ListenUpgradeError< - (), - ::InboundProtocol, - >, + _: ListenUpgradeError<(), ::InboundProtocol>, ) { - libp2p_core::util::unreachable(error.into_inner()); } fn on_dial_upgrade_error( diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index c2e31ae95f6..b77450a617d 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -430,6 +430,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(DialUpgradeError { error, .. }) => { self.events.push(ConnectionHandlerEvent::NotifyBehaviour( Event::IdentificationError( + #[allow(unused)] error.map_upgrade_err(|e| libp2p_core::util::unreachable(e.into_inner())), ), )); diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index e297f2f4afa..0906aa80723 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -84,8 +84,8 @@ fn two_fields() { let _out_event: ::ToSwarm = unimplemented!(); match _out_event { FooEvent::Ping(ping::Event { .. }) => {} - FooEvent::Identify(event) => { - let _: identify::Event = event; + FooEvent::Identify(_event) => { + let _: identify::Event = _event; } } } @@ -112,11 +112,11 @@ fn three_fields() { let _out_event: ::ToSwarm = unimplemented!(); match _out_event { FooEvent::Ping(ping::Event { .. }) => {} - FooEvent::Identify(event) => { - let _: identify::Event = event; + FooEvent::Identify(_event) => { + let _: identify::Event = _event; } - FooEvent::Kad(event) => { - let _: libp2p_kad::Event = event; + FooEvent::Kad(_event) => { + let _: libp2p_kad::Event = _event; } } } From b3d31673a0ed3f45f4c95cf4865c8381dba6f471 Mon Sep 17 00:00:00 2001 From: mattyg Date: Wed, 5 Nov 2025 08:24:34 -0700 Subject: [PATCH 51/68] chore: updated rustwasm links to new org home The rustwasm organization was archived this year. Projects have moved to new URLs to reflect that. This PR just updates those URLs. See https://blog.rust-lang.org/inside-rust/2025/07/21/sunsetting-the-rustwasm-github-org/ Pull-Request: #6199. --- examples/browser-webrtc/README.md | 4 ++-- interop-tests/Dockerfile.chromium | 2 +- transports/websocket-websys/src/lib.rs | 2 +- transports/webtransport-websys/src/lib.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/browser-webrtc/README.md b/examples/browser-webrtc/README.md index eec2c9c0494..2b53cd4c1d8 100644 --- a/examples/browser-webrtc/README.md +++ b/examples/browser-webrtc/README.md @@ -1,11 +1,11 @@ # Rust-libp2p Browser-Server WebRTC Example This example demonstrates how to use the `libp2p-webrtc-websys` transport library in a browser to ping the WebRTC Server. -It uses [wasm-pack](https://rustwasm.github.io/docs/wasm-pack/) to build the project for use in the browser. +It uses [wasm-pack](https://drager.github.io/wasm-pack/) to build the project for use in the browser. ## Running the example -Ensure you have `wasm-pack` [installed](https://rustwasm.github.io/wasm-pack/). +Ensure you have `wasm-pack` [installed](https://drager.github.io/wasm-pack/). 1. Build the client library: ```shell diff --git a/interop-tests/Dockerfile.chromium b/interop-tests/Dockerfile.chromium index 73a9ab82ee7..ff9284f6da2 100644 --- a/interop-tests/Dockerfile.chromium +++ b/interop-tests/Dockerfile.chromium @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1.5-labs FROM rust:1.83 as chef RUN rustup target add wasm32-unknown-unknown -RUN wget -q -O- https://github.com/rustwasm/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" +RUN wget -q -O- https://github.com/drager/wasm-pack/releases/download/v0.12.1/wasm-pack-v0.12.1-x86_64-unknown-linux-musl.tar.gz | tar -zx -C /usr/local/bin --strip-components 1 --wildcards "wasm-pack-*/wasm-pack" RUN wget -q -O- https://github.com/WebAssembly/binaryen/releases/download/version_115/binaryen-version_115-x86_64-linux.tar.gz | tar -zx -C /usr/local/bin --strip-components 2 --wildcards "binaryen-version_*/bin/wasm-opt" RUN wget -q -O- https://github.com/LukeMathWalker/cargo-chef/releases/download/v0.1.62/cargo-chef-x86_64-unknown-linux-gnu.tar.gz | tar -zx -C /usr/local/bin WORKDIR /app diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 72f4068610d..aad50877506 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -//! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). +//! Libp2p websocket transports built on [web-sys](https://wasm-bindgen.github.io/wasm-bindgen/contributing/web-sys/index.html). #![allow(unexpected_cfgs)] diff --git a/transports/webtransport-websys/src/lib.rs b/transports/webtransport-websys/src/lib.rs index 126adc054a9..8381467737d 100644 --- a/transports/webtransport-websys/src/lib.rs +++ b/transports/webtransport-websys/src/lib.rs @@ -1,4 +1,4 @@ -//! Libp2p WebTransport built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html) +//! Libp2p WebTransport built on [web-sys](https://wasm-bindgen.github.io/wasm-bindgen/contributing/web-sys/index.html) #![allow(unexpected_cfgs)] From c89444fbb7f5d46d982284f03420cab8a2cd3816 Mon Sep 17 00:00:00 2001 From: Momcilo Miladinovic <93727411+momoshell@users.noreply.github.com> Date: Thu, 6 Nov 2025 14:40:41 +0100 Subject: [PATCH 52/68] chore(uds): Renamed UDS Confing to follow re-export convention From #2217. Renaming UDS Config symbol from `UdsConfig` in the doc comment and from `TokioUdsConfig`, for better referencing when re-exported. Left deprecated alias for backward compatibility. Pull-Request: #6190. --- Cargo.lock | 2 +- Cargo.toml | 2 +- transports/uds/CHANGELOG.md | 5 +++++ transports/uds/Cargo.toml | 2 +- transports/uds/src/lib.rs | 34 ++++++++++++++++++++-------------- 5 files changed, 28 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 421ae65f53f..cf9c09a3138 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3161,7 +3161,7 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.43.0" +version = "0.43.1" dependencies = [ "futures", "libp2p-core", diff --git a/Cargo.toml b/Cargo.toml index 36c148cffee..6557a33b025 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,7 +107,7 @@ libp2p-swarm-derive = { version = "=0.35.1", path = "swarm-derive" } # `libp2p-s libp2p-swarm-test = { version = "0.6.0", path = "swarm-test" } libp2p-tcp = { version = "0.44.0", path = "transports/tcp" } libp2p-tls = { version = "0.6.2", path = "transports/tls" } -libp2p-uds = { version = "0.43.0", path = "transports/uds" } +libp2p-uds = { version = "0.43.1", path = "transports/uds" } libp2p-upnp = { version = "0.6.0", path = "protocols/upnp" } libp2p-webrtc = { version = "0.9.0-alpha.2", path = "transports/webrtc" } libp2p-webrtc-utils = { version = "0.4.0", path = "misc/webrtc-utils" } diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 92d2f9a4c76..911dab70eca 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.43.1 + +- Rename Config to match naming convention in [discussion 2174](https://github.com/libp2p/rust-libp2p/discussions/2174). + See [PR 6190](https://github.com/libp2p/rust-libp2p/pull/6190). + ## 0.43.0 - Remove `async-std` support. diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index c5f748d933a..82356512f8f 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-uds" edition.workspace = true rust-version = { workspace = true } description = "Unix domain sockets transport for libp2p" -version = "0.43.0" +version = "0.43.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index b69aa9d6243..082bc831bc9 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -26,9 +26,9 @@ //! //! # Usage //! -//! The `UdsConfig` transport supports multiaddresses of the form `/unix//tmp/foo`. +//! The `Config` transport supports multiaddresses of the form `/unix//tmp/foo`. //! -//! The `UdsConfig` structs implements the `Transport` trait of the `core` library. See the +//! The `Config` structs implements the `Transport` trait of the `core` library. See the //! documentation of `core` and of libp2p in general to learn how to use the `Transport` trait. #![cfg(all(unix, not(target_os = "emscripten"), feature = "tokio"))] @@ -62,28 +62,28 @@ pub type Listener = BoxStream< >; macro_rules! codegen { - ($feature_name:expr, $uds_config:ident, $build_listener:expr, $unix_stream:ty, $($mut_or_not:tt)*) => { + ($feature_name:expr, $config:ident, $build_listener:expr, $unix_stream:ty, $($mut_or_not:tt)*) => { /// Represents the configuration for a Unix domain sockets transport capability for libp2p. - pub struct $uds_config { + pub struct $config { listeners: VecDeque<(ListenerId, Listener)>, } - impl $uds_config { + impl $config { /// Creates a new configuration object for Unix domain sockets. - pub fn new() -> $uds_config { - $uds_config { + pub fn new() -> $config { + $config { listeners: VecDeque::new(), } } } - impl Default for $uds_config { + impl Default for $config { fn default() -> Self { Self::new() } } - impl Transport for $uds_config { + impl Transport for $config { type Output = $unix_stream; type Error = io::Error; type ListenerUpgrade = Ready>; @@ -204,11 +204,16 @@ macro_rules! codegen { #[cfg(feature = "tokio")] codegen!( "tokio", - TokioUdsConfig, + Config, |addr| async move { tokio::net::UnixListener::bind(&addr) }, tokio::net::UnixStream, ); +// Deprecated type alias for backward compatibility +#[cfg(feature = "tokio")] +#[deprecated(since = "0.43.1", note = "Use `libp2p::uds::Config` instead")] +pub type TokioUdsConfig = Config; + /// Turns a `Multiaddr` containing a single `Unix` component into a path. /// /// Also returns an error if the path is not absolute, as we don't want to dial/listen on relative @@ -243,7 +248,8 @@ mod tests { }; use tokio::io::{AsyncReadExt, AsyncWriteExt}; - use super::{multiaddr_to_path, TokioUdsConfig}; + use super::multiaddr_to_path; + use crate::Config; #[test] fn multiaddr_to_path_conversion() { @@ -272,7 +278,7 @@ mod tests { let (tx, rx) = oneshot::channel(); let listener = async move { - let mut transport = TokioUdsConfig::new().boxed(); + let mut transport = Config::new().boxed(); transport.listen_on(ListenerId::next(), addr).unwrap(); let listen_addr = transport @@ -296,7 +302,7 @@ mod tests { }; let dialer = async move { - let mut uds = TokioUdsConfig::new(); + let mut uds = Config::new(); let addr = rx.await.unwrap(); let mut socket = uds .dial( @@ -318,7 +324,7 @@ mod tests { #[test] #[ignore] // TODO: for the moment unix addresses fail to parse fn larger_addr_denied() { - let mut uds = TokioUdsConfig::new(); + let mut uds = Config::new(); let addr = "/unix//foo/bar".parse::().unwrap(); assert!(uds.listen_on(ListenerId::next(), addr).is_err()); From 246fe262041468ce093e6838ebd21ce4b8709040 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sun, 9 Nov 2025 00:59:35 +0000 Subject: [PATCH 53/68] update behaviour to cache partials --- protocols/gossipsub/src/behaviour.rs | 61 ++++++++++++++++++++-------- protocols/gossipsub/src/partial.rs | 6 +-- protocols/gossipsub/src/types.rs | 5 ++- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 799107e4f1c..41305a48d25 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -310,6 +310,10 @@ pub struct Behaviour { #[cfg(feature = "partial_messages")] partial_opts: HashMap, + /// Cached partial messages. + #[cfg(feature = "partial_messages")] + cached_partials: HashMap, Box>>, + /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -475,6 +479,8 @@ where gossip_promises: Default::default(), #[cfg(feature = "partial_messages")] partial_opts: Default::default(), + #[cfg(feature = "partial_messages")] + cached_partials: Default::default(), }) } @@ -832,14 +838,14 @@ where } #[cfg(feature = "partial_messages")] - pub fn publish_partial( + pub fn publish_partial( &mut self, topic: impl Into, - partial_message: &P, + partial_message: P, ) -> Result<(), PublishError> { let topic_hash = topic.into(); - let group_id = partial_message.group_id().as_ref().to_vec(); + let group_id = partial_message.group_id(); let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { peer.partial_opts @@ -847,7 +853,7 @@ where .map(|opts| opts.supports_partial) .unwrap_or_default() }); - let metadata = partial_message.parts_metadata().as_ref().to_vec(); + let metadata = partial_message.parts_metadata(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -878,9 +884,9 @@ where continue; } - let Ok(action) = - partial_message.partial_message_bytes_from_metadata(peer_partial.metadata.as_ref()) - else { + let Ok(action) = partial_message.partial_message_bytes_from_metadata( + peer_partial.metadata.as_ref().map(|p| p.as_ref()), + ) else { tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); peer_partials.remove(&group_id); @@ -916,6 +922,9 @@ where return Err(PublishError::NoPeersSubscribedToTopic); } + let cached_topic = self.cached_partials.entry(topic_hash).or_default(); + + cached_topic.insert(partial_message.group_id(), Box::new(partial_message)); Ok(()) } @@ -1715,6 +1724,7 @@ where .entry(partial_message.group_id.clone()) .or_default(); + // Check if the local partial data we have from the peer is oudated. let metadata_updated = match (&mut peer_partial.metadata, &partial_message.metadata) { (None, Some(remote_metadata)) => { peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); @@ -1749,16 +1759,35 @@ where (Some(_), None) | (None, None) => false, }; - if metadata_updated { - self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial { - topic_id: partial_message.topic_id, - propagation_source: *peer_id, - group_id: partial_message.group_id, - message: partial_message.message, - metadata: partial_message.metadata, - })); + if !metadata_updated { + return; } + + let Some(local_partial) = self + .cached_partials + .get_mut(&partial_message.topic_id) + .and_then(|t| t.get(&partial_message.group_id)) + else { + // Partial should exist in our cache as it exists in the peer details. + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + "partial doesn't exist in the cache" + ); + return; + }; + + // let local_updated = match local_partial + + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); } /// Removes the specified peer from the mesh, returning true if it was present. diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index fe6555d7a54..7c14fecc3cc 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -40,14 +40,14 @@ pub trait Partial { /// All partial messages belonging to the same logical message should return /// the same group ID. This is used to associate partial messages together /// during reconstruction. - fn group_id(&self) -> impl AsRef<[u8]>; + fn group_id(&self) -> Vec; /// Returns application defined metadata describing which parts of the message /// are available and which parts we want. /// /// The returned bytes will be sent in partsMetadata field to advertise /// available and wanted parts to peers. - fn parts_metadata(&self) -> impl AsRef<[u8]>; + fn parts_metadata(&self) -> Vec; /// Generates partial message bytes from the given metadata. /// @@ -58,7 +58,7 @@ pub trait Partial { /// Returns a [`PublishAction`] for the given metadata, or an error. fn partial_message_bytes_from_metadata( &self, - metadata: Option>, + metadata: Option<&[u8]>, ) -> Result; } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 12f939d6e91..430f0fdface 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -124,11 +124,14 @@ pub struct PartialSubOpts { pub(crate) supports_partial: bool, } -/// Stored `Metadata` for a peer. +/// Stored `Metadata` for a peer, +/// `Remote` or `Local` depends on who last updated it. #[cfg(feature = "partial_messages")] #[derive(Debug)] pub(crate) enum PeerMetadata { + /// The metadata was updated with data from a remote peer. Remote(Vec), + /// The metadata was updated by us when publishing a partial message. Local(Box), } From a3ba2278f4ca617e820a79aba0159f982553a1fc Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Mon, 10 Nov 2025 16:30:36 +0100 Subject: [PATCH 54/68] experimental change --- protocols/gossipsub/src/behaviour.rs | 164 ++++++++++++++++++--------- protocols/gossipsub/src/error.rs | 6 + protocols/gossipsub/src/partial.rs | 102 +++++++++++++---- protocols/gossipsub/src/types.rs | 12 +- 4 files changed, 203 insertions(+), 81 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 41305a48d25..587cdbfcde3 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -18,18 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::{ - cmp::{ - max, - Ordering::{self, Equal}, - }, - collections::{BTreeSet, HashMap, HashSet, VecDeque}, - fmt::{self, Debug}, - net::IpAddr, - task::{Context, Poll}, - time::Duration, -}; - +use crate::partial::DynamicPartial; +use crate::partial::Metadata; use futures::FutureExt; use futures_timer::Delay; use hashlink::LinkedHashMap; @@ -49,10 +39,24 @@ use libp2p_swarm::{ use prometheus_client::registry::Registry; use quick_protobuf::{MessageWrite, Writer}; use rand::{seq::SliceRandom, thread_rng}; +use std::any::Any; +use std::{ + cmp::{ + max, + Ordering::{self, Equal}, + }, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, + fmt::{self, Debug}, + mem, + net::IpAddr, + task::{Context, Poll}, + time::Duration, +}; use web_time::{Instant, SystemTime}; #[cfg(feature = "metrics")] use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; +use crate::types::PeerMetadata; use crate::{ backoff::BackoffStorage, config::{Config, ValidationMode}, @@ -72,11 +76,12 @@ use crate::{ MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction, }, - FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, + FailedMessages, PartialMessageError, PublishError, SubscriptionError, TopicScoreParams, + ValidationError, }; #[cfg(feature = "partial_messages")] use crate::{ - partial::{Partial, PublishAction}, + partial::Partial, types::{PartialMessage, PartialSubOpts}, }; @@ -312,7 +317,7 @@ pub struct Behaviour { /// Cached partial messages. #[cfg(feature = "partial_messages")] - cached_partials: HashMap, Box>>, + cached_partials: HashMap, Box>>, /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -853,7 +858,9 @@ where .map(|opts| opts.supports_partial) .unwrap_or_default() }); - let metadata = partial_message.parts_metadata(); + let metadata = partial_message.metadata(); + let metadata_bytes = metadata.encode(); + for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -876,7 +883,7 @@ where *peer_id, RpcOut::PartialMessage { message: None, - metadata: metadata.clone(), + metadata: metadata_bytes.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -884,34 +891,73 @@ where continue; } - let Ok(action) = partial_message.partial_message_bytes_from_metadata( - peer_partial.metadata.as_ref().map(|p| p.as_ref()), - ) else { - tracing::error!(peer = %peer_id, group_id = ?group_id, - "Could not reconstruct message bytes for peer metadata"); - peer_partials.remove(&group_id); - if let PeerScoreState::Active(peer_score) = &mut self.peer_score { - peer_score.reject_invalid_partial(peer_id, &topic_hash); + let peer_metadata = match mem::take(&mut peer_partial.metadata) { + None => None, + Some(PeerMetadata::Local(metadata)) => { + let metadata: Box = metadata; + Some( + metadata + .downcast::() + .map_err(|_| PartialMessageError::InvalidFormat)?, + ) } - continue; + Some(PeerMetadata::Remote(metadata)) => match P::Metadata::decode(&metadata) { + Ok(metadata) => Some(Box::new(metadata)), + Err(err) => { + tracing::error!(peer = %peer_id, group_id = ?group_id, ?err, + "Could not reconstruct message bytes for peer metadata"); + peer_partials.remove(&group_id); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &topic_hash); + } + continue; + } + }, }; - let message = match action { - PublishAction::SameMetadata => { - // No new data to send peer. - continue; + + let message = if let Some(mut peer_metadata) = peer_metadata { + let order = peer_metadata.compare(&metadata); + + match order { + Some(Ordering::Less) => { + // We have strictly less than the peer. Send them our metadata to request data. + peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); + None + } + Some(Ordering::Equal) => { + // We know the exact same thing as the peer. Send nothing. + peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); + continue; + } + Some(Ordering::Greater) | None => { + // We know something that the peer does not. Send some data. + let Some(data) = + partial_message.partial_message_bytes_from_metadata(&peer_metadata)? else { + // todo if we are here somethings wrong + continue; + }; + if !peer_metadata.update(&metadata)? { + // todo if we are here somethings wrong + } + peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); + Some(data) + } } - PublishAction::Send { metadata, message } => { - peer_partial.metadata = Some(crate::types::PeerMetadata::Local(metadata)); + } else { + // Eager push + if let Some((message, metadata)) = partial_message.data_for_eager_push()? { + peer_partial.metadata = Some(PeerMetadata::Local(Box::new(metadata))); Some(message) + } else { + None } - PublishAction::NothingToSend => None, }; self.send_message( *peer_id, RpcOut::PartialMessage { message, - metadata: metadata.clone(), + metadata: metadata_bytes.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -1759,26 +1805,42 @@ where (Some(_), None) | (None, None) => false, }; - if !metadata_updated { - return; - } - - let Some(local_partial) = self + if let Some(local_partial) = self .cached_partials - .get_mut(&partial_message.topic_id) + .get(&partial_message.topic_id) .and_then(|t| t.get(&partial_message.group_id)) - else { - // Partial should exist in our cache as it exists in the peer details. - tracing::debug!( - peer=%peer_id, - topic=%partial_message.topic_id, - group_id=?partial_message.group_id, - "partial doesn't exist in the cache" - ); - return; - }; + { + if let Some(peer_metadata) = &mut peer_partial.metadata { + if let PeerMetadata::Remote(data) = &peer_metadata { + let Ok(metadata) = local_partial.decode_metadata(data) else { + // todo punish + return; + }; + *peer_metadata = PeerMetadata::Local(metadata); + } - // let local_updated = match local_partial + if let PeerMetadata::Local(peer_metadata) = peer_metadata { + if let Ok(message) = local_partial.partial_message_bytes_from_metadata(&**peer_metadata) { + let local_metadata = local_partial.metadata(); + + if message.is_some() { + let _ = peer_metadata.update_dynamic(local_metadata); + } + + self.send_message(*peer_id, RpcOut::PartialMessage { + group_id: partial_message.group_id.clone(), + topic_id: partial_message.topic_id.clone(), + message, + metadata: local_metadata.encode(), + }); + } + } + } + } + + if !metadata_updated { + return; + } self.events .push_back(ToSwarm::GenerateEvent(Event::Partial { diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index fac93210e7d..18e988d5559 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -91,6 +91,12 @@ impl From for PublishError { } } +impl From for PublishError { + fn from(error: PartialMessageError) -> Self { + PublishError::Partial(error) + } +} + #[derive(Debug, Clone, Copy, PartialEq)] pub enum ValidationError { /// The message has an invalid signature, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 7c14fecc3cc..322938591b4 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -18,6 +18,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::any::Any; +use std::cmp::Ordering; use std::fmt::Debug; use crate::error::PartialMessageError; @@ -35,6 +37,8 @@ use crate::error::PartialMessageError; /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together pub trait Partial { + type Metadata: Metadata; + /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -47,7 +51,28 @@ pub trait Partial { /// /// The returned bytes will be sent in partsMetadata field to advertise /// available and wanted parts to peers. - fn parts_metadata(&self) -> Vec; + fn metadata(&self) -> &Self::Metadata; + + /// Generates partial message bytes from the given metadata. + /// + /// When a peer requests specific parts (via PartialIWANT), this method + /// generates the actual message data to send back. The `metadata` parameter + /// describes what parts are being requested. + /// + /// Returns a [`PublishAction`] for the given metadata, or an error. + fn partial_message_bytes_from_metadata( + &self, + metadata: &Self::Metadata, + ) -> Result>, PartialMessageError>; + + fn data_for_eager_push(&self) + -> Result, Self::Metadata)>, PartialMessageError>; +} + +pub(crate) trait DynamicPartial { + fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError>; + + fn metadata(&self) -> &dyn DynamicMetadata; /// Generates partial message bytes from the given metadata. /// @@ -58,29 +83,68 @@ pub trait Partial { /// Returns a [`PublishAction`] for the given metadata, or an error. fn partial_message_bytes_from_metadata( &self, - metadata: Option<&[u8]>, - ) -> Result; + metadata: &dyn DynamicMetadata, + ) -> Result>, PartialMessageError>; } -pub trait Metadata: Debug + Send + Sync { - /// Return the `Metadata` as a byte slice. - fn as_slice(&self) -> &[u8]; +impl DynamicPartial for P { + fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError> { + Ok(Box::new(P::Metadata::decode(metadata)?)) + } + + fn metadata(&self) -> &dyn DynamicMetadata { + self.metadata() + } + + fn partial_message_bytes_from_metadata( + &self, + metadata: &dyn DynamicMetadata, + ) -> Result>, PartialMessageError> { + let metadata: &dyn Any = metadata; + let Some(metadata) = metadata.downcast_ref::() else { + return Err(PartialMessageError::InvalidFormat) + }; + self.partial_message_bytes_from_metadata(metadata) + } +} + +pub trait Metadata: Debug + Send + Sync + Any { + fn decode(bytes: &[u8]) -> Result + where + Self: Sized; + + fn compare(&self, other: &Self) -> Option; + // Return the `Metadata` as a byte slice. + fn encode(&self) -> Vec; + /// try to Update the `Metadata` with the remote data, + /// return true if it was updated. + fn update(&mut self, data: &Self) -> Result; +} + +pub(crate) trait DynamicMetadata: Debug + Send + Sync + Any { /// try to Update the `Metadata` with the remote data, /// return true if it was updated. fn update(&mut self, data: &[u8]) -> Result; + /// try to Update the `Metadata` with the remote data, + /// return true if it was updated. + fn update_dynamic(&mut self, data: &dyn DynamicMetadata) -> Result; + fn encode(&self) -> Vec; } -/// Indicates the action to take for the given metadata. -pub enum PublishAction { - /// The provided input metadata is the same as the output, - /// this means we have the same data as the peer. - SameMetadata, - /// We have nothing to send to the peer, but we need parts from the peer. - NothingToSend, - /// We have something of interest to this peer, but can not send everything it needs. Send a - /// message and associate some new metadata to the peer, representing the remaining need. - Send { - message: Vec, - metadata: Box, - }, +impl DynamicMetadata for M { + fn update(&mut self, metadata: &[u8]) -> Result { + self.update(&M::decode(metadata)?) + } + + fn update_dynamic(&mut self, metadata: &dyn DynamicMetadata) -> Result { + let metadata: &dyn Any = metadata; + let Some(metadata) = metadata.downcast_ref::() else { + return Err(PartialMessageError::InvalidFormat) + }; + self.update(metadata) + } + + fn encode(&self) -> Vec { + self.encode() + } } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 430f0fdface..261f12b879d 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -132,17 +132,7 @@ pub(crate) enum PeerMetadata { /// The metadata was updated with data from a remote peer. Remote(Vec), /// The metadata was updated by us when publishing a partial message. - Local(Box), -} - -#[cfg(feature = "partial_messages")] -impl AsRef<[u8]> for PeerMetadata { - fn as_ref(&self) -> &[u8] { - match self { - PeerMetadata::Remote(metadata) => metadata, - PeerMetadata::Local(metadata) => metadata.as_slice(), - } - } + Local(Box), } /// The partial message data the peer has. From aa943174438429695c2766dddeadac1af0115b4a Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Mon, 10 Nov 2025 17:02:53 +0100 Subject: [PATCH 55/68] add send and sync --- protocols/gossipsub/src/partial.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 322938591b4..94faf1f425b 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -36,7 +36,7 @@ use crate::error::PartialMessageError; /// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together -pub trait Partial { +pub trait Partial: Send + Sync { type Metadata: Metadata; /// Returns the unique identifier for this message group. @@ -69,7 +69,7 @@ pub trait Partial { -> Result, Self::Metadata)>, PartialMessageError>; } -pub(crate) trait DynamicPartial { +pub(crate) trait DynamicPartial: Send + Sync { fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError>; fn metadata(&self) -> &dyn DynamicMetadata; From 4e59700079fa4abfe4bbdf46bfad877152de61eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 11 Nov 2025 15:52:59 +0000 Subject: [PATCH 56/68] automatically reply to partial messages --- protocols/gossipsub/src/behaviour.rs | 117 +++++++++++++++++---------- protocols/gossipsub/src/partial.rs | 21 ++--- 2 files changed, 83 insertions(+), 55 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 41305a48d25..5eead334f38 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -76,7 +76,7 @@ use crate::{ }; #[cfg(feature = "partial_messages")] use crate::{ - partial::{Partial, PublishAction}, + partial::Partial, types::{PartialMessage, PartialSubOpts}, }; @@ -853,7 +853,7 @@ where .map(|opts| opts.supports_partial) .unwrap_or_default() }); - let metadata = partial_message.parts_metadata(); + let publish_metadata = partial_message.metadata(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -867,8 +867,8 @@ where continue; }; - let peer_partials = peer.partial_messages.entry(topic_hash.clone()).or_default(); - let peer_partial = peer_partials.entry(group_id.clone()).or_default(); + let topic_partials = peer.partial_messages.entry(topic_hash.clone()).or_default(); + let group_partials = topic_partials.entry(group_id.clone()).or_default(); // Peer `supports_partial` but doesn't `requests_partial`. if !partial_opts.requests_partial { @@ -876,7 +876,7 @@ where *peer_id, RpcOut::PartialMessage { message: None, - metadata: metadata.clone(), + metadata: publish_metadata.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -885,33 +885,30 @@ where } let Ok(action) = partial_message.partial_message_bytes_from_metadata( - peer_partial.metadata.as_ref().map(|p| p.as_ref()), + group_partials.metadata.as_ref().map(|p| p.as_ref()), ) else { tracing::error!(peer = %peer_id, group_id = ?group_id, "Could not reconstruct message bytes for peer metadata"); - peer_partials.remove(&group_id); + topic_partials.remove(&group_id); if let PeerScoreState::Active(peer_score) = &mut self.peer_score { peer_score.reject_invalid_partial(peer_id, &topic_hash); } continue; }; - let message = match action { - PublishAction::SameMetadata => { - // No new data to send peer. - continue; - } - PublishAction::Send { metadata, message } => { - peer_partial.metadata = Some(crate::types::PeerMetadata::Local(metadata)); - Some(message) - } - PublishAction::NothingToSend => None, + + // Check if we have new data for the peer. + let Some((message, peer_updated_metadata)) = action.send else { + continue; }; + group_partials.metadata = + Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); + self.send_message( *peer_id, RpcOut::PartialMessage { - message, - metadata: metadata.clone(), + message: Some(message), + metadata: publish_metadata.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -923,7 +920,6 @@ where } let cached_topic = self.cached_partials.entry(topic_hash).or_default(); - cached_topic.insert(partial_message.group_id(), Box::new(partial_message)); Ok(()) } @@ -1717,22 +1713,24 @@ where return; }; - let peer_partial = peer + let topic_partials = peer .partial_messages .entry(partial_message.topic_id.clone()) - .or_default() + .or_default(); + + let group_partials = topic_partials .entry(partial_message.group_id.clone()) .or_default(); // Check if the local partial data we have from the peer is oudated. - let metadata_updated = match (&mut peer_partial.metadata, &partial_message.metadata) { + let metadata_updated = match (&mut group_partials.metadata, &partial_message.metadata) { (None, Some(remote_metadata)) => { - peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + group_partials.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); true } (Some(PeerMetadata::Remote(ref metadata)), Some(remote_metadata)) => { if metadata != remote_metadata { - peer_partial.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + group_partials.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); true } else { false @@ -1763,31 +1761,68 @@ where return; } + // We may have already received other partials from this and other peers, + // but haven't responded to them yet, in those situations just return + // the partial to the application layer. let Some(local_partial) = self .cached_partials .get_mut(&partial_message.topic_id) .and_then(|t| t.get(&partial_message.group_id)) else { - // Partial should exist in our cache as it exists in the peer details. - tracing::debug!( - peer=%peer_id, - topic=%partial_message.topic_id, - group_id=?partial_message.group_id, - "partial doesn't exist in the cache" - ); + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); return; }; - // let local_updated = match local_partial + let action = match local_partial + .partial_message_bytes_from_metadata(partial_message.metadata.as_deref()) + { + Ok(action) => action, + Err(err) => { + tracing::debug!(peer = %peer_id, group_id = ?partial_message.group_id,err = %err, + "Could not reconstruct message bytes for peer metadata from a received partial"); + // Should we remove the partial from the peer? + topic_partials.remove(&partial_message.group_id); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &partial_message.topic_id); + } + return; + } + }; + + // We have new data for that peer. + if let Some((message, peer_updated_metadata)) = action.send { + group_partials.metadata = + Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); + + let cached_metadata = local_partial.metadata().as_slice().to_vec(); + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: Some(message), + metadata: cached_metadata, + group_id: partial_message.group_id.clone(), + topic_id: partial_message.topic_id.clone(), + }, + ); + } - self.events - .push_back(ToSwarm::GenerateEvent(Event::Partial { - topic_id: partial_message.topic_id, - propagation_source: *peer_id, - group_id: partial_message.group_id, - message: partial_message.message, - metadata: partial_message.metadata, - })); + if action.need { + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); + } } /// Removes the specified peer from the mesh, returning true if it was present. diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 7c14fecc3cc..5e6aad54648 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -34,7 +34,7 @@ use crate::error::PartialMessageError; /// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together -pub trait Partial { +pub trait Partial: Send + Sync { /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -47,7 +47,7 @@ pub trait Partial { /// /// The returned bytes will be sent in partsMetadata field to advertise /// available and wanted parts to peers. - fn parts_metadata(&self) -> Vec; + fn metadata(&self) -> Vec; /// Generates partial message bytes from the given metadata. /// @@ -71,16 +71,9 @@ pub trait Metadata: Debug + Send + Sync { } /// Indicates the action to take for the given metadata. -pub enum PublishAction { - /// The provided input metadata is the same as the output, - /// this means we have the same data as the peer. - SameMetadata, - /// We have nothing to send to the peer, but we need parts from the peer. - NothingToSend, - /// We have something of interest to this peer, but can not send everything it needs. Send a - /// message and associate some new metadata to the peer, representing the remaining need. - Send { - message: Vec, - metadata: Box, - }, +pub struct PublishAction { + /// Indicate if we want remote data from the peer. + pub need: bool, + /// Indicate if we have data to send for that peer + pub send: Option<(Vec, Box)>, } From 0a58619c727c03cb097fdae38e8e5c17db360768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 12 Nov 2025 14:10:00 +0000 Subject: [PATCH 57/68] Revert "add send and sync" This reverts commit aa943174438429695c2766dddeadac1af0115b4a. --- protocols/gossipsub/src/partial.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 94faf1f425b..322938591b4 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -36,7 +36,7 @@ use crate::error::PartialMessageError; /// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together -pub trait Partial: Send + Sync { +pub trait Partial { type Metadata: Metadata; /// Returns the unique identifier for this message group. @@ -69,7 +69,7 @@ pub trait Partial: Send + Sync { -> Result, Self::Metadata)>, PartialMessageError>; } -pub(crate) trait DynamicPartial: Send + Sync { +pub(crate) trait DynamicPartial { fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError>; fn metadata(&self) -> &dyn DynamicMetadata; From 1ea4ebd60ea7a743b08f64b05d759522a49aba5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 12 Nov 2025 14:10:21 +0000 Subject: [PATCH 58/68] Revert "experimental change" This reverts commit a3ba2278f4ca617e820a79aba0159f982553a1fc. --- protocols/gossipsub/src/behaviour.rs | 164 +++++++++------------------ protocols/gossipsub/src/error.rs | 6 - protocols/gossipsub/src/partial.rs | 102 ++++------------- protocols/gossipsub/src/types.rs | 12 +- 4 files changed, 81 insertions(+), 203 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 587cdbfcde3..41305a48d25 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -18,8 +18,18 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use crate::partial::DynamicPartial; -use crate::partial::Metadata; +use std::{ + cmp::{ + max, + Ordering::{self, Equal}, + }, + collections::{BTreeSet, HashMap, HashSet, VecDeque}, + fmt::{self, Debug}, + net::IpAddr, + task::{Context, Poll}, + time::Duration, +}; + use futures::FutureExt; use futures_timer::Delay; use hashlink::LinkedHashMap; @@ -39,24 +49,10 @@ use libp2p_swarm::{ use prometheus_client::registry::Registry; use quick_protobuf::{MessageWrite, Writer}; use rand::{seq::SliceRandom, thread_rng}; -use std::any::Any; -use std::{ - cmp::{ - max, - Ordering::{self, Equal}, - }, - collections::{BTreeSet, HashMap, HashSet, VecDeque}, - fmt::{self, Debug}, - mem, - net::IpAddr, - task::{Context, Poll}, - time::Duration, -}; use web_time::{Instant, SystemTime}; #[cfg(feature = "metrics")] use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; -use crate::types::PeerMetadata; use crate::{ backoff::BackoffStorage, config::{Config, ValidationMode}, @@ -76,12 +72,11 @@ use crate::{ MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction, }, - FailedMessages, PartialMessageError, PublishError, SubscriptionError, TopicScoreParams, - ValidationError, + FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; #[cfg(feature = "partial_messages")] use crate::{ - partial::Partial, + partial::{Partial, PublishAction}, types::{PartialMessage, PartialSubOpts}, }; @@ -317,7 +312,7 @@ pub struct Behaviour { /// Cached partial messages. #[cfg(feature = "partial_messages")] - cached_partials: HashMap, Box>>, + cached_partials: HashMap, Box>>, /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -858,9 +853,7 @@ where .map(|opts| opts.supports_partial) .unwrap_or_default() }); - let metadata = partial_message.metadata(); - let metadata_bytes = metadata.encode(); - + let metadata = partial_message.parts_metadata(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { @@ -883,7 +876,7 @@ where *peer_id, RpcOut::PartialMessage { message: None, - metadata: metadata_bytes.clone(), + metadata: metadata.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -891,73 +884,34 @@ where continue; } - let peer_metadata = match mem::take(&mut peer_partial.metadata) { - None => None, - Some(PeerMetadata::Local(metadata)) => { - let metadata: Box = metadata; - Some( - metadata - .downcast::() - .map_err(|_| PartialMessageError::InvalidFormat)?, - ) + let Ok(action) = partial_message.partial_message_bytes_from_metadata( + peer_partial.metadata.as_ref().map(|p| p.as_ref()), + ) else { + tracing::error!(peer = %peer_id, group_id = ?group_id, + "Could not reconstruct message bytes for peer metadata"); + peer_partials.remove(&group_id); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &topic_hash); } - Some(PeerMetadata::Remote(metadata)) => match P::Metadata::decode(&metadata) { - Ok(metadata) => Some(Box::new(metadata)), - Err(err) => { - tracing::error!(peer = %peer_id, group_id = ?group_id, ?err, - "Could not reconstruct message bytes for peer metadata"); - peer_partials.remove(&group_id); - if let PeerScoreState::Active(peer_score) = &mut self.peer_score { - peer_score.reject_invalid_partial(peer_id, &topic_hash); - } - continue; - } - }, + continue; }; - - let message = if let Some(mut peer_metadata) = peer_metadata { - let order = peer_metadata.compare(&metadata); - - match order { - Some(Ordering::Less) => { - // We have strictly less than the peer. Send them our metadata to request data. - peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); - None - } - Some(Ordering::Equal) => { - // We know the exact same thing as the peer. Send nothing. - peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); - continue; - } - Some(Ordering::Greater) | None => { - // We know something that the peer does not. Send some data. - let Some(data) = - partial_message.partial_message_bytes_from_metadata(&peer_metadata)? else { - // todo if we are here somethings wrong - continue; - }; - if !peer_metadata.update(&metadata)? { - // todo if we are here somethings wrong - } - peer_partial.metadata = Some(PeerMetadata::Local(peer_metadata)); - Some(data) - } + let message = match action { + PublishAction::SameMetadata => { + // No new data to send peer. + continue; } - } else { - // Eager push - if let Some((message, metadata)) = partial_message.data_for_eager_push()? { - peer_partial.metadata = Some(PeerMetadata::Local(Box::new(metadata))); + PublishAction::Send { metadata, message } => { + peer_partial.metadata = Some(crate::types::PeerMetadata::Local(metadata)); Some(message) - } else { - None } + PublishAction::NothingToSend => None, }; self.send_message( *peer_id, RpcOut::PartialMessage { message, - metadata: metadata_bytes.clone(), + metadata: metadata.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), }, @@ -1805,43 +1759,27 @@ where (Some(_), None) | (None, None) => false, }; - if let Some(local_partial) = self - .cached_partials - .get(&partial_message.topic_id) - .and_then(|t| t.get(&partial_message.group_id)) - { - if let Some(peer_metadata) = &mut peer_partial.metadata { - if let PeerMetadata::Remote(data) = &peer_metadata { - let Ok(metadata) = local_partial.decode_metadata(data) else { - // todo punish - return; - }; - *peer_metadata = PeerMetadata::Local(metadata); - } - - if let PeerMetadata::Local(peer_metadata) = peer_metadata { - if let Ok(message) = local_partial.partial_message_bytes_from_metadata(&**peer_metadata) { - let local_metadata = local_partial.metadata(); - - if message.is_some() { - let _ = peer_metadata.update_dynamic(local_metadata); - } - - self.send_message(*peer_id, RpcOut::PartialMessage { - group_id: partial_message.group_id.clone(), - topic_id: partial_message.topic_id.clone(), - message, - metadata: local_metadata.encode(), - }); - } - } - } - } - if !metadata_updated { return; } + let Some(local_partial) = self + .cached_partials + .get_mut(&partial_message.topic_id) + .and_then(|t| t.get(&partial_message.group_id)) + else { + // Partial should exist in our cache as it exists in the peer details. + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + "partial doesn't exist in the cache" + ); + return; + }; + + // let local_updated = match local_partial + self.events .push_back(ToSwarm::GenerateEvent(Event::Partial { topic_id: partial_message.topic_id, diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 18e988d5559..fac93210e7d 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -91,12 +91,6 @@ impl From for PublishError { } } -impl From for PublishError { - fn from(error: PartialMessageError) -> Self { - PublishError::Partial(error) - } -} - #[derive(Debug, Clone, Copy, PartialEq)] pub enum ValidationError { /// The message has an invalid signature, diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs index 322938591b4..7c14fecc3cc 100644 --- a/protocols/gossipsub/src/partial.rs +++ b/protocols/gossipsub/src/partial.rs @@ -18,8 +18,6 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. -use std::any::Any; -use std::cmp::Ordering; use std::fmt::Debug; use crate::error::PartialMessageError; @@ -37,8 +35,6 @@ use crate::error::PartialMessageError; /// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` /// 6. The `group_id()` ties all parts of the same logical message together pub trait Partial { - type Metadata: Metadata; - /// Returns the unique identifier for this message group. /// /// All partial messages belonging to the same logical message should return @@ -51,28 +47,7 @@ pub trait Partial { /// /// The returned bytes will be sent in partsMetadata field to advertise /// available and wanted parts to peers. - fn metadata(&self) -> &Self::Metadata; - - /// Generates partial message bytes from the given metadata. - /// - /// When a peer requests specific parts (via PartialIWANT), this method - /// generates the actual message data to send back. The `metadata` parameter - /// describes what parts are being requested. - /// - /// Returns a [`PublishAction`] for the given metadata, or an error. - fn partial_message_bytes_from_metadata( - &self, - metadata: &Self::Metadata, - ) -> Result>, PartialMessageError>; - - fn data_for_eager_push(&self) - -> Result, Self::Metadata)>, PartialMessageError>; -} - -pub(crate) trait DynamicPartial { - fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError>; - - fn metadata(&self) -> &dyn DynamicMetadata; + fn parts_metadata(&self) -> Vec; /// Generates partial message bytes from the given metadata. /// @@ -83,68 +58,29 @@ pub(crate) trait DynamicPartial { /// Returns a [`PublishAction`] for the given metadata, or an error. fn partial_message_bytes_from_metadata( &self, - metadata: &dyn DynamicMetadata, - ) -> Result>, PartialMessageError>; + metadata: Option<&[u8]>, + ) -> Result; } -impl DynamicPartial for P { - fn decode_metadata(&self, metadata: &[u8]) -> Result, PartialMessageError> { - Ok(Box::new(P::Metadata::decode(metadata)?)) - } - - fn metadata(&self) -> &dyn DynamicMetadata { - self.metadata() - } - - fn partial_message_bytes_from_metadata( - &self, - metadata: &dyn DynamicMetadata, - ) -> Result>, PartialMessageError> { - let metadata: &dyn Any = metadata; - let Some(metadata) = metadata.downcast_ref::() else { - return Err(PartialMessageError::InvalidFormat) - }; - self.partial_message_bytes_from_metadata(metadata) - } -} - -pub trait Metadata: Debug + Send + Sync + Any { - fn decode(bytes: &[u8]) -> Result - where - Self: Sized; - - fn compare(&self, other: &Self) -> Option; - // Return the `Metadata` as a byte slice. - fn encode(&self) -> Vec; - /// try to Update the `Metadata` with the remote data, - /// return true if it was updated. - fn update(&mut self, data: &Self) -> Result; -} - -pub(crate) trait DynamicMetadata: Debug + Send + Sync + Any { +pub trait Metadata: Debug + Send + Sync { + /// Return the `Metadata` as a byte slice. + fn as_slice(&self) -> &[u8]; /// try to Update the `Metadata` with the remote data, /// return true if it was updated. fn update(&mut self, data: &[u8]) -> Result; - /// try to Update the `Metadata` with the remote data, - /// return true if it was updated. - fn update_dynamic(&mut self, data: &dyn DynamicMetadata) -> Result; - fn encode(&self) -> Vec; } -impl DynamicMetadata for M { - fn update(&mut self, metadata: &[u8]) -> Result { - self.update(&M::decode(metadata)?) - } - - fn update_dynamic(&mut self, metadata: &dyn DynamicMetadata) -> Result { - let metadata: &dyn Any = metadata; - let Some(metadata) = metadata.downcast_ref::() else { - return Err(PartialMessageError::InvalidFormat) - }; - self.update(metadata) - } - - fn encode(&self) -> Vec { - self.encode() - } +/// Indicates the action to take for the given metadata. +pub enum PublishAction { + /// The provided input metadata is the same as the output, + /// this means we have the same data as the peer. + SameMetadata, + /// We have nothing to send to the peer, but we need parts from the peer. + NothingToSend, + /// We have something of interest to this peer, but can not send everything it needs. Send a + /// message and associate some new metadata to the peer, representing the remaining need. + Send { + message: Vec, + metadata: Box, + }, } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 261f12b879d..430f0fdface 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -132,7 +132,17 @@ pub(crate) enum PeerMetadata { /// The metadata was updated with data from a remote peer. Remote(Vec), /// The metadata was updated by us when publishing a partial message. - Local(Box), + Local(Box), +} + +#[cfg(feature = "partial_messages")] +impl AsRef<[u8]> for PeerMetadata { + fn as_ref(&self) -> &[u8] { + match self { + PeerMetadata::Remote(metadata) => metadata, + PeerMetadata::Local(metadata) => metadata.as_slice(), + } + } } /// The partial message data the peer has. From ac1404e3a35bea1c6eae951a35dc3c45ea256323 Mon Sep 17 00:00:00 2001 From: Yiannis Marangos Date: Wed, 12 Nov 2025 17:47:54 +0200 Subject: [PATCH 59/68] fix(webtransport-websys): Allow `poll_flush` after `poll_close` This removes a `debug_assert` from `poll_flush` because it can be valid for the user to recheck the flush state. In that case the operation should be noop. A real life code that triggers this assertition is [this](https://github.com/libp2p/rust-asynchronous-codec/blob/c818a83906891caf8aadcae8f899727c2c8393a8/src/framed_write.rs#L263-L266). Fixes https://github.com/libp2p/rust-libp2p/issues/5618 Pull-Request: #6193. --- Cargo.lock | 2 +- Cargo.toml | 2 +- transports/webtransport-websys/CHANGELOG.md | 5 +++++ transports/webtransport-websys/Cargo.toml | 2 +- transports/webtransport-websys/src/stream.rs | 4 ---- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf9c09a3138..fdeada37500 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3293,7 +3293,7 @@ dependencies = [ [[package]] name = "libp2p-webtransport-websys" -version = "0.5.1" +version = "0.5.2" dependencies = [ "futures", "js-sys", diff --git a/Cargo.toml b/Cargo.toml index 6557a33b025..9e65d1ab62f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,7 +114,7 @@ libp2p-webrtc-utils = { version = "0.4.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.4.0", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.45.2", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.5.0", path = "transports/websocket-websys" } -libp2p-webtransport-websys = { version = "0.5.1", path = "transports/webtransport-websys" } +libp2p-webtransport-websys = { version = "0.5.2", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.47.0", path = "muxers/yamux" } # External dependencies diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md index e455e5ec6c3..e94220bc23e 100644 --- a/transports/webtransport-websys/CHANGELOG.md +++ b/transports/webtransport-websys/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.5.2 + +- Remove `poll_flush called after poll_close` assertion. + See [PR 6193](https://github.com/libp2p/rust-libp2p/pull/6193). + ## 0.5.1 - Remove `once_cell` dependency. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index e577ea74896..bcdf317a3ae 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-webtransport-websys" edition.workspace = true rust-version = { workspace = true } description = "WebTransport for libp2p under WASM environment" -version = "0.5.1" +version = "0.5.2" authors = [ "Yiannis Marangos ", "oblique ", diff --git a/transports/webtransport-websys/src/stream.rs b/transports/webtransport-websys/src/stream.rs index b9d1669b6dc..18de06e3d8f 100644 --- a/transports/webtransport-websys/src/stream.rs +++ b/transports/webtransport-websys/src/stream.rs @@ -153,10 +153,6 @@ impl StreamInner { // messages were flushed. self.poll_writer_ready(cx) } else { - debug_assert!( - false, - "libp2p_webtransport_websys::Stream: poll_flush called after poll_close" - ); Poll::Ready(Ok(())) } } From 0125fa96b8de44ce3c3836a26339bfd9ba6db6fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Onur=20=C3=96zkan?= Date: Tue, 18 Nov 2025 20:44:11 +0000 Subject: [PATCH 60/68] refactor(gossipsub): do early return in for an empty input Adds a fast path in `remove_data_messages` to avoid unnecessary work when the caller provides an empty `message_ids` slice. Pull-Request: #6208. --- protocols/gossipsub/CHANGELOG.md | 5 ++++- protocols/gossipsub/src/queue.rs | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index b143b72c15c..dba3d12e879 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -19,13 +19,16 @@ - Fix incorrect default values in ConfigBuilder See [PR 6113](https://github.com/libp2p/rust-libp2p/pull/6113) - + - Remove duplicated config `set_topic_max_transmit_size` method, prefer `max_transmit_size_for_topic`. See [PR 6173](https://github.com/libp2p/rust-libp2p/pull/6173). - Switch the internal `async-channel` used to dispatch messages from `NetworkBehaviour` to the `ConnectionHandler` with an internal priority queue. See [PR 6175](https://github.com/libp2p/rust-libp2p/pull/6175) +- gossipsub: do early return in for an empty input + See [PR 6208](https://github.com/libp2p/rust-libp2p/pull/6208). + ## 0.49.2 - Relax `Behaviour::with_metrics` requirements, do not require DataTransform and TopicSubscriptionFilter to also impl Default diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs index ff04392e618..971ae801f83 100644 --- a/protocols/gossipsub/src/queue.rs +++ b/protocols/gossipsub/src/queue.rs @@ -81,6 +81,10 @@ impl Queue { /// Remove pending low priority Publish and Forward messages. /// Returns the number of messages removed. pub(crate) fn remove_data_messages(&mut self, message_ids: &[MessageId]) -> usize { + if message_ids.is_empty() { + return 0; + } + let mut count = 0; self.non_priority.retain(|message| match message { RpcOut::Publish { message_id, .. } | RpcOut::Forward { message_id, .. } => { From a0bf99378081759e38ae8d5f8ed6fca437c80874 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 08:05:56 +0000 Subject: [PATCH 61/68] deps: bump Swatinem/rust-cache from 2.8.1 to 2.8.2 Pull-Request: #6219. --- .github/workflows/cache-factory.yml | 2 +- .github/workflows/ci.yml | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 36b294e40e1..4533df0e80d 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3c607a04fb4..e217e8b59a9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: shared-key: stable-cache save-if: false @@ -145,7 +145,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -170,7 +170,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -190,7 +190,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -207,7 +207,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -233,7 +233,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -249,7 +249,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -268,7 +268,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 with: shared-key: stable-cache save-if: false @@ -359,7 +359,7 @@ jobs: steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo install --version 0.10.0 pb-rs --locked @@ -385,7 +385,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: From b0edc685374936633f08106f0f4516ca5a01d638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Onur=20=C3=96zkan?= Date: Sun, 7 Dec 2025 14:17:02 +0300 Subject: [PATCH 62/68] refactor(gossipsub): in-place negative-score peer removal This change improves peer removal during `heartbeat` by switching from a two-pass remove logic to an in-place `retain` with very small size intermediate variable `removed_peers_count`. Pull-Request: #6209. --- protocols/gossipsub/CHANGELOG.md | 3 +++ protocols/gossipsub/src/behaviour.rs | 30 +++++++++++++++++----------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index dba3d12e879..116e85c7849 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -29,6 +29,9 @@ - gossipsub: do early return in for an empty input See [PR 6208](https://github.com/libp2p/rust-libp2p/pull/6208). +- Refactor gossipsub with in-place negative-score peer removal. + See [PR 6209](https://github.com/libp2p/rust-libp2p/pull/6209). + ## 0.49.2 - Relax `Behaviour::with_metrics` requirements, do not require DataTransform and TopicSubscriptionFilter to also impl Default diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index ee451904d01..521b603efad 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -2135,11 +2135,14 @@ where let mesh_n_high = self.config.mesh_n_high_for_topic(topic_hash); let mesh_outbound_min = self.config.mesh_outbound_min_for_topic(topic_hash); - // drop all peers with negative score, without PX - // if there is at some point a stable retain method for BTreeSet the following can be - // written more efficiently with retain. - let mut to_remove_peers = Vec::new(); - for peer_id in peers.iter() { + #[cfg(feature = "metrics")] + let mut removed_peers_count = 0; + + // Drop all peers with negative score, without PX + // + // TODO: Use `extract_if` once MSRV is raised to a version that includes its + // stabilization. + peers.retain(|peer_id| { let peer_score = scores.get(peer_id).map(|r| r.score).unwrap_or_default(); // Record the score per mesh @@ -2159,17 +2162,20 @@ where let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); no_px.insert(*peer_id); - to_remove_peers.push(*peer_id); + + #[cfg(feature = "metrics")] + { + removed_peers_count += 1; + } + + return false; } - } + true + }); #[cfg(feature = "metrics")] if let Some(m) = self.metrics.as_mut() { - m.peers_removed(topic_hash, Churn::BadScore, to_remove_peers.len()) - } - - for peer_id in to_remove_peers { - peers.remove(&peer_id); + m.peers_removed(topic_hash, Churn::BadScore, removed_peers_count) } // too little peers - add some From 2a7e1fee31ca378d6cf48a10a8ddc4bbbbd1c1f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 8 Dec 2025 21:06:23 +0000 Subject: [PATCH 63/68] fix(identity): make quick-protobuf dep optional it's not required for the `peerid` feature Pull-Request: #6226. --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- identity/CHANGELOG.md | 5 +++++ identity/Cargo.toml | 12 ++++++------ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdeada37500..a752ff90feb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2665,7 +2665,7 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.12" +version = "0.2.13" dependencies = [ "asn1_der", "bs58", @@ -6612,7 +6612,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9e65d1ab62f..c873981917d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,7 +84,7 @@ libp2p-dns = { version = "0.44.0", path = "transports/dns" } libp2p-floodsub = { version = "0.47.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.50.0", path = "protocols/gossipsub" } libp2p-identify = { version = "0.47.0", path = "protocols/identify" } -libp2p-identity = { version = "0.2.12" } +libp2p-identity = { version = "0.2.13" } libp2p-kad = { version = "0.49.0", path = "protocols/kad" } libp2p-mdns = { version = "0.48.0", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.5.0", path = "misc/memory-connection-limits" } diff --git a/identity/CHANGELOG.md b/identity/CHANGELOG.md index 6c58a32af3f..430d8263e7a 100644 --- a/identity/CHANGELOG.md +++ b/identity/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.13 + +- Turn the `quick-protobuf` dependency optional to only the features which require it. + See [PR 6226](https://github.com/libp2p/rust-libp2p/pull/6226) + ## 0.2.12 - Avoid depending on the `rand_core` feature in `ed25519-dalek` crate. diff --git a/identity/Cargo.toml b/identity/Cargo.toml index b5120efe078..774e77649cd 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-identity" -version = "0.2.12" +version = "0.2.13" edition = "2021" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. description = "Data structures and algorithms for identifying peers in libp2p." rust-version = "1.73.0" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. @@ -20,7 +20,7 @@ k256 = { version = "0.13.4", optional = true, features = ["ecdsa", "arithmetic"] tracing = { workspace = true } multihash = { version = "0.19.1", optional = true } p256 = { version = "0.13", default-features = false, features = ["ecdsa", "std", "pem"], optional = true } -quick-protobuf = "0.8.1" +quick-protobuf = { version = "0.8.1", optional = true } rand = { version = "0.8", optional = true } sec1 = { version = "0.7", default-features = false, optional = true } serde = { version = "1", optional = true, features = ["derive"] } @@ -32,10 +32,10 @@ zeroize = { version = "1.8", optional = true } ring = { workspace = true, features = ["alloc", "std"], optional = true } [features] -secp256k1 = ["dep:k256", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize"] -ecdsa = ["dep:p256", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf"] -rsa = ["dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize"] -ed25519 = ["dep:ed25519-dalek", "dep:zeroize", "dep:sha2", "dep:hkdf"] +secp256k1 = ["dep:k256", "dep:asn1_der", "dep:sha2", "dep:hkdf", "dep:zeroize", "dep:quick-protobuf"] +ecdsa = ["dep:p256", "dep:zeroize", "dep:sec1", "dep:sha2", "dep:hkdf", "dep:quick-protobuf"] +rsa = ["dep:ring", "dep:asn1_der", "dep:rand", "dep:zeroize", "dep:quick-protobuf"] +ed25519 = ["dep:ed25519-dalek", "dep:zeroize", "dep:sha2", "dep:hkdf", "dep:quick-protobuf"] peerid = ["dep:multihash", "dep:bs58", "dep:thiserror", "dep:sha2", "dep:hkdf"] rand = ["dep:rand"] From cdad5cb405bf8d1c0e2f96725e3747a25e094fb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Tue, 9 Dec 2025 18:43:48 +0000 Subject: [PATCH 64/68] add transient peers to the publish peers --- protocols/gossipsub/src/behaviour.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 5431c2d0663..8b74f090708 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -847,12 +847,23 @@ where let group_id = partial_message.group_id(); - let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { + let mut recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { peer.partial_opts .get(&topic_hash) .map(|opts| opts.supports_partial) .unwrap_or_default() }); + + // We add the peers which also have the same group_id to the publish peers, + // this allows us to reply to peers whom may not be on our mesh but still want the partial update. + let transient_peers = self.connected_peers.iter().filter_map(|(peer_id, peer)| { + let topic_partials = peer.partial_messages.get(&topic_hash)?; + topic_partials.get(&group_id)?; + Some(peer_id) + }); + + recipient_peers.extend(transient_peers); + let publish_metadata = partial_message.metadata(); for peer_id in recipient_peers.iter() { // TODO: this can be optimized, we are going to get the peer again on `send_message` From 3de7fc0d2253bb0c824616606345e7d39896d02c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 11 Dec 2025 13:24:03 +0000 Subject: [PATCH 65/68] fix fanout, we need fanout to be mesh_n not non empty --- protocols/gossipsub/src/behaviour.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 8b74f090708..b5e103baf0e 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -814,20 +814,23 @@ where .copied() .collect::>(); + let needed_extra_peers = mesh_n.saturating_sub(fanout_peers.len()); + recipient_peers.extend(fanout_peers); + // If we have fanout peers add them to the map. - if !fanout_peers.is_empty() { - recipient_peers.extend(fanout_peers); - } else { + if needed_extra_peers > 0 { // We have no fanout peers, select mesh_n of them and add them to the fanout let new_peers = - get_random_peers(peers_on_topic, topic_hash, mesh_n, |_, _| true); + get_random_peers(peers_on_topic, topic_hash, needed_extra_peers, |_, _| { + true + }); // Add the new peers to the fanout and recipient peers + tracing::debug!(?new_peers, "Peers added to fanout"); self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - tracing::debug!(%peer, "Peer added to fanout"); - recipient_peers.insert(peer); - } + recipient_peers.extend(new_peers); } + + // recipient_peers.extend(self.fanout); // We are publishing to fanout peers - update the time we published self.fanout_last_pub .insert(topic_hash.clone(), Instant::now()); From 6e0c2a55d8cdd9a740aadd6df05030eecc2758d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 11 Dec 2025 18:18:08 +0000 Subject: [PATCH 66/68] fix wording on comments when gathering fanout peers --- protocols/gossipsub/src/behaviour.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index b5e103baf0e..eac17e449ba 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -815,11 +815,11 @@ where .collect::>(); let needed_extra_peers = mesh_n.saturating_sub(fanout_peers.len()); + // If we have fanout peers add them to the map. recipient_peers.extend(fanout_peers); - // If we have fanout peers add them to the map. if needed_extra_peers > 0 { - // We have no fanout peers, select mesh_n of them and add them to the fanout + // We have insufficient fanout peers, select mesh_n of them and add them to the fanout let new_peers = get_random_peers(peers_on_topic, topic_hash, needed_extra_peers, |_, _| { true From 62a9fc41146801ac230f93d3c9e69b36300567cb Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Fri, 12 Dec 2025 14:11:52 +0100 Subject: [PATCH 67/68] fix never sending any partial message unless another has been received before --- protocols/gossipsub/src/behaviour.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index eac17e449ba..f098a12efc1 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -911,17 +911,23 @@ where }; // Check if we have new data for the peer. - let Some((message, peer_updated_metadata)) = action.send else { + let message = if let Some((message, peer_updated_metadata)) = action.send { + // We have something to send, update the peer's metadata. + group_partials.metadata = + Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); + Some(message) + } else if group_partials.metadata.is_none() || action.need { + // We have no data to eagerly send, but we want to transmit our metadata anyway, to + // let the peer know of our metadata so that it sends us its data. + None + } else { continue; }; - group_partials.metadata = - Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); - self.send_message( *peer_id, RpcOut::PartialMessage { - message: Some(message), + message, metadata: publish_metadata.clone(), group_id: group_id.clone(), topic_id: topic_hash.clone(), From 7751293725f666e7efb75be865bb07ea8c43991e Mon Sep 17 00:00:00 2001 From: Daniel Knopik Date: Fri, 12 Dec 2025 18:31:10 +0100 Subject: [PATCH 68/68] do not return early in handle_partial_messsage if it pushes data --- protocols/gossipsub/src/behaviour.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index f098a12efc1..fb831cad611 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -1777,7 +1777,7 @@ where (Some(_), None) | (None, None) => false, }; - if !metadata_updated { + if !metadata_updated && partial_message.message.is_none() { return; }