diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml new file mode 100644 index 00000000..724ef8e3 --- /dev/null +++ b/.github/workflows/go-check.yml @@ -0,0 +1,19 @@ +name: Go Checks + +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + merge_group: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + go-check: + uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json new file mode 100644 index 00000000..b0642fbe --- /dev/null +++ b/.github/workflows/go-test-config.json @@ -0,0 +1,4 @@ +{ + "skipOSes": ["windows", "macos"], + "skipRace": true +} diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml new file mode 100644 index 00000000..505ece58 --- /dev/null +++ b/.github/workflows/go-test.yml @@ -0,0 +1,21 @@ +name: Go Test + +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + merge_group: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + go-test: + uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml new file mode 100644 index 00000000..681b5ef1 --- /dev/null +++ b/.github/workflows/release-check.yml @@ -0,0 +1,21 @@ +name: Release Checker + +on: + pull_request_target: + paths: ["version.json"] + types: [ opened, synchronize, reopened, labeled, unlabeled ] + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + release-check: + uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 + with: + sources: '["version.json"]' diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml new file mode 100644 index 00000000..a2b2a044 --- /dev/null +++ b/.github/workflows/releaser.yml @@ -0,0 +1,21 @@ +name: Releaser + +on: + push: + paths: ["version.json"] + workflow_dispatch: + +permissions: + contents: write + +concurrency: + group: ${{ github.workflow }}-${{ github.sha }} + cancel-in-progress: true + +jobs: + releaser: + uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 + with: + sources: '["version.json"]' + secrets: + UCI_GITHUB_TOKEN: ${{ secrets.UCI_GITHUB_TOKEN }} diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml new file mode 100644 index 00000000..5ef3fb9e --- /dev/null +++ b/.github/workflows/tagpush.yml @@ -0,0 +1,18 @@ +name: Tag Push Checker + +on: + push: + tags: + - v* + +permissions: + contents: read + issues: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + releaser: + uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 diff --git a/backoff.go b/backoff.go index 4909e153..99ca7fd0 100644 --- a/backoff.go +++ b/backoff.go @@ -43,7 +43,6 @@ func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Dur info: make(map[peer.ID]*backoffHistory), } - rand.Seed(time.Now().UnixNano()) // used for jitter go b.cleanupLoop(ctx) return b diff --git a/blacklist_test.go b/blacklist_test.go index 045a9c85..a19c46e4 100644 --- a/blacklist_test.go +++ b/blacklist_test.go @@ -38,7 +38,7 @@ func TestBlacklist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -66,7 +66,7 @@ func TestBlacklist2(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -99,7 +99,7 @@ func TestBlacklist3(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) psubs[1].BlacklistPeer(hosts[0].ID()) diff --git a/comm.go b/comm.go index 2dee9b2e..d38cce08 100644 --- a/comm.go +++ b/comm.go @@ -114,7 +114,7 @@ func (p *PubSub) notifyPeerDead(pid peer.ID) { } } -func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing *rpcQueue) { s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...) if err != nil { log.Debug("opening new stream to peer: ", err, pid) @@ -135,7 +135,7 @@ func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan } } -func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing *rpcQueue) { select { case <-time.After(backoff): p.handleNewPeer(ctx, pid, outgoing) @@ -156,7 +156,7 @@ func (p *PubSub) handlePeerDead(s network.Stream) { p.notifyPeerDead(pid) } -func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) { +func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing *rpcQueue) { writeRpc := func(rpc *RPC) error { size := uint64(rpc.Size()) @@ -174,20 +174,17 @@ func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, ou } defer s.Close() - for { - select { - case rpc, ok := <-outgoing: - if !ok { - return - } + for ctx.Err() == nil { + rpc, err := outgoing.Pop(ctx) + if err != nil { + log.Debugf("popping message from the queue to send to %s: %s", s.Conn().RemotePeer(), err) + return + } - err := writeRpc(rpc) - if err != nil { - s.Reset() - log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) - return - } - case <-ctx.Done(): + err = writeRpc(rpc) + if err != nil { + s.Reset() + log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) return } } @@ -209,15 +206,17 @@ func rpcWithControl(msgs []*pb.Message, ihave []*pb.ControlIHave, iwant []*pb.ControlIWant, graft []*pb.ControlGraft, - prune []*pb.ControlPrune) *RPC { + prune []*pb.ControlPrune, + idontwant []*pb.ControlIDontWant) *RPC { return &RPC{ RPC: pb.RPC{ Publish: msgs, Control: &pb.ControlMessage{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, }, }, } diff --git a/compat/compat.pb.go b/compat/compat.pb.go index 607b78ac..57a00dd8 100644 --- a/compat/compat.pb.go +++ b/compat/compat.pb.go @@ -5,10 +5,11 @@ package compat_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/discovery_test.go b/discovery_test.go index 66c9c80e..f539e69d 100644 --- a/discovery_test.go +++ b/discovery_test.go @@ -134,7 +134,7 @@ func TestSimpleDiscovery(t *testing.T) { server := newDiscoveryServer() discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(1 * time.Minute)} - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := make([]*PubSub, numHosts) topicHandlers := make([]*Topic, numHosts) @@ -234,7 +234,7 @@ func TestGossipSubDiscoveryAfterBootstrap(t *testing.T) { discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(ttl)} // Put the pubsub clients into two partitions - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := make([]*PubSub, numHosts) topicHandlers := make([]*Topic, numHosts) diff --git a/floodsub.go b/floodsub.go index 20f592e2..45b3fdee 100644 --- a/floodsub.go +++ b/floodsub.go @@ -71,6 +71,8 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (fs *FloodSubRouter) PreValidation([]*Message) {} + func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} func (fs *FloodSubRouter) Publish(msg *Message) { @@ -83,19 +85,19 @@ func (fs *FloodSubRouter) Publish(msg *Message) { continue } - mch, ok := fs.p.peers[pid] + q, ok := fs.p.peers[pid] if !ok { continue } - select { - case mch <- out: - fs.tracer.SendRPC(out, pid) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", pid) fs.tracer.DropRPC(out, pid) // Drop it. The peer is too slow. + continue } + fs.tracer.SendRPC(out, pid) } } diff --git a/floodsub_test.go b/floodsub_test.go index 35dd0d53..8efedaad 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -3,11 +3,12 @@ package pubsub import ( "bytes" "context" + crand "crypto/rand" "crypto/sha256" "encoding/base64" "fmt" "io" - "math/rand" + mrand "math/rand" "sort" "sync" "testing" @@ -20,15 +21,13 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) func checkMessageRouting(t *testing.T, topic string, pubs []*PubSub, subs []*Subscription) { data := make([]byte, 16) - rand.Read(data) + crand.Read(data) for _, p := range pubs { err := p.Publish(topic, data) @@ -42,22 +41,8 @@ func checkMessageRouting(t *testing.T, topic string, pubs []*PubSub, subs []*Sub } } -func getNetHosts(t *testing.T, ctx context.Context, n int) []host.Host { - var out []host.Host - - for i := 0; i < n; i++ { - netw := swarmt.GenSwarm(t) - h := bhost.NewBlankHost(netw) - t.Cleanup(func() { h.Close() }) - out = append(out, h) - } - - return out -} - func connect(t *testing.T, a, b host.Host) { - pinfo := a.Peerstore().PeerInfo(a.ID()) - err := b.Connect(context.Background(), pinfo) + err := b.Connect(context.Background(), peer.AddrInfo{ID: a.ID(), Addrs: a.Addrs()}) if err != nil { t.Fatal(err) } @@ -74,7 +59,7 @@ func denseConnect(t *testing.T, hosts []host.Host) { func connectSome(t *testing.T, hosts []host.Host, d int) { for i, a := range hosts { for j := 0; j < d; j++ { - n := rand.Intn(len(hosts)) + n := mrand.Intn(len(hosts)) if n == i { j-- continue @@ -151,7 +136,7 @@ func assertNeverReceives(t *testing.T, ch *Subscription, timeout time.Duration) func TestBasicFloodsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubs(ctx, hosts) @@ -173,7 +158,7 @@ func TestBasicFloodsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -193,7 +178,7 @@ func TestMultihops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 6) + hosts := getDefaultHosts(t, 6) psubs := getPubsubs(ctx, hosts) @@ -235,7 +220,7 @@ func TestReconnects(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) @@ -309,7 +294,7 @@ func TestNoConnection(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts) @@ -334,7 +319,7 @@ func TestSelfReceive(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - host := getNetHosts(t, ctx, 1)[0] + host := getDefaultHosts(t, 1)[0] psub, err := NewFloodSub(ctx, host) if err != nil { @@ -368,7 +353,7 @@ func TestOneToOne(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -401,7 +386,7 @@ func TestTreeTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -464,7 +449,7 @@ func TestFloodSubPluggableProtocol(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub", "/lsr/floodsub") psubB := mustCreatePubSub(ctx, t, hosts[1], "/esh/floodsub") @@ -496,7 +481,7 @@ func TestFloodSubPluggableProtocol(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub") psubB := mustCreatePubSub(ctx, t, hosts[1], "/lsr/floodsub") @@ -551,7 +536,7 @@ func TestSubReporting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - host := getNetHosts(t, ctx, 1)[0] + host := getDefaultHosts(t, 1)[0] psub, err := NewFloodSub(ctx, host) if err != nil { t.Fatal(err) @@ -593,7 +578,7 @@ func TestPeerTopicReporting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 4) + hosts := getDefaultHosts(t, 4) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -650,7 +635,7 @@ func TestSubscribeMultipleTimes(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -695,7 +680,7 @@ func TestPeerDisconnect(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -743,7 +728,7 @@ func TestWithNoSigning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithNoAuthor(), WithMessageIdFn(func(pmsg *pb.Message) string { // silly content-based test message-ID: just use the data as whole return base64.URLEncoding.EncodeToString(pmsg.Data) @@ -788,7 +773,7 @@ func TestWithSigning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithStrictSignatureVerification(true)) connect(t, hosts[0], hosts[1]) @@ -830,7 +815,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) adversary := hosts[0] honestPeer := hosts[1] @@ -948,7 +933,7 @@ func TestMessageSender(t *testing.T) { const topic = "foobar" - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) var msgs []*Subscription @@ -1002,7 +987,7 @@ func TestConfigurableMaxMessageSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) // use a 4mb limit; default is 1mb; we'll test with a 2mb payload. psubs := getPubsubs(ctx, hosts, WithMaxMessageSize(1<<22)) @@ -1022,7 +1007,7 @@ func TestConfigurableMaxMessageSize(t *testing.T) { // 2mb payload. msg := make([]byte, 1<<21) - rand.Read(msg) + crand.Read(msg) err := psubs[0].Publish(topic, msg) if err != nil { t.Fatal(err) @@ -1045,7 +1030,7 @@ func TestAnnounceRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps := getPubsub(ctx, hosts[0]) watcher := &announceWatcher{} hosts[1].SetStreamHandler(FloodSubID, watcher.handleStream) @@ -1117,7 +1102,7 @@ func TestPubsubWithAssortedOptions(t *testing.T) { return string(hash[:]) } - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithMessageIdFn(hashMsgID), WithPeerOutboundQueueSize(10), @@ -1152,8 +1137,7 @@ func TestWithInvalidMessageAuthor(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h.Close() + h := getDefaultHosts(t, 1)[0] _, err := NewFloodSub(ctx, h, WithMessageAuthor("bogotr0n")) if err == nil { t.Fatal("expected error") @@ -1168,10 +1152,9 @@ func TestPreconnectedNodes(t *testing.T) { defer cancel() // Create hosts - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] opts := []Option{WithDiscovery(&dummyDiscovery{})} // Setup first PubSub @@ -1229,10 +1212,9 @@ func TestDedupInboundStreams(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] _, err := NewFloodSub(ctx, h1) if err != nil { @@ -1247,18 +1229,30 @@ func TestDedupInboundStreams(t *testing.T) { if err != nil { t.Fatal(err) } + _, err = s1.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) s2, err := h2.NewStream(ctx, h1.ID(), FloodSubID) if err != nil { t.Fatal(err) } + _, err = s2.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) s3, err := h2.NewStream(ctx, h1.ID(), FloodSubID) if err != nil { t.Fatal(err) } + _, err = s3.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) // check that s1 and s2 have been reset diff --git a/go.mod b/go.mod index bd2a94f0..f1358e71 100644 --- a/go.mod +++ b/go.mod @@ -1,44 +1,45 @@ module github.com/libp2p/go-libp2p-pubsub -go 1.21 +go 1.22 require ( github.com/benbjohnson/clock v1.3.5 github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.34.0 + github.com/libp2p/go-libp2p v0.36.3 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 - github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-varint v0.0.7 + go.uber.org/zap v1.27.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/elastic/gosigar v0.14.2 // indirect + github.com/elastic/gosigar v0.14.3 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect @@ -48,7 +49,7 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -61,51 +62,52 @@ require ( github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect - github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pion/datachannel v1.5.6 // indirect - github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect + github.com/pion/interceptor v0.1.30 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.6 // indirect - github.com/pion/sctp v1.8.16 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.40 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.44.0 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/testify v1.9.0 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + github.com/wlynxg/anet v0.0.4 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.21.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 5b9ae3b1..71a6bd2f 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= @@ -45,8 +45,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -56,10 +56,10 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -74,8 +74,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -87,16 +85,16 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -117,10 +115,10 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -136,8 +134,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.34.0 h1:J+SL3DMz+zPz06OHSRt42GKA5n5hmwgY1l7ckLUz3+c= -github.com/libp2p/go-libp2p v0.34.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= +github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -162,8 +160,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -185,8 +183,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= +github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -203,27 +201,29 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= @@ -234,52 +234,50 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= -github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= +github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= -github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -324,7 +322,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -336,16 +333,19 @@ github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cb github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -369,16 +369,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -390,8 +387,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -412,13 +409,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -434,8 +428,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -460,34 +454,27 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -506,8 +493,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -528,8 +515,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -547,7 +534,7 @@ grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJd honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/gossipsub.go b/gossipsub.go index 36b1a8ca..0a7fc194 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -2,7 +2,9 @@ package pubsub import ( "context" + "crypto/sha256" "fmt" + "io" "math/rand" "sort" "time" @@ -17,17 +19,25 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + + "go.uber.org/zap/zapcore" ) const ( // GossipSubID_v10 is the protocol ID for version 1.0.0 of the GossipSub protocol. - // It is advertised along with GossipSubID_v11 for backwards compatibility. + // It is advertised along with GossipSubID_v11 and GossipSubID_v12 for backwards compatibility. GossipSubID_v10 = protocol.ID("/meshsub/1.0.0") // GossipSubID_v11 is the protocol ID for version 1.1.0 of the GossipSub protocol. + // It is advertised along with GossipSubID_v12 for backwards compatibility. // See the spec for details about how v1.1.0 compares to v1.0.0: // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md GossipSubID_v11 = protocol.ID("/meshsub/1.1.0") + + // GossipSubID_v12 is the protocol ID for version 1.2.0 of the GossipSub protocol. + // See the spec for details about how v1.2.0 compares to v1.1.0: + // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md + GossipSubID_v12 = protocol.ID("/meshsub/1.2.0") ) // Defines the default gossipsub parameters. @@ -58,9 +68,17 @@ var ( GossipSubGraftFloodThreshold = 10 * time.Second GossipSubMaxIHaveLength = 5000 GossipSubMaxIHaveMessages = 10 + GossipSubMaxIDontWantMessages = 1000 GossipSubIWantFollowupTime = 3 * time.Second + GossipSubIDontWantMessageThreshold = 1024 // 1KB + GossipSubIDontWantMessageTTL = 3 // 3 heartbeats ) +type checksum struct { + payload [32]byte + length uint8 +} + // GossipSubParams defines all the gossipsub specific parameters. type GossipSubParams struct { // overlay parameters. @@ -200,10 +218,21 @@ type GossipSubParams struct { // MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. MaxIHaveMessages int + // MaxIDontWantMessages is the maximum number of IDONTWANT messages to accept from a peer within a heartbeat. + MaxIDontWantMessages int + // Time to wait for a message requested through IWANT following an IHAVE advertisement. // If the message is not received within this window, a broken promise is declared and // the router may apply bahavioural penalties. IWantFollowupTime time.Duration + + // IDONTWANT is only sent for messages larger than the threshold. This should be greater than + // D_high * the size of the message id. Otherwise, the attacker can do the amplication attack by sending + // small messages while the receiver replies back with larger IDONTWANT messages. + IDontWantMessageThreshold int + + // IDONTWANT is cleared when it's older than the TTL. + IDontWantMessageTTL int } // NewGossipSub returns a new PubSub object using the default GossipSubRouter as the router. @@ -222,23 +251,25 @@ func NewGossipSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter, o func DefaultGossipSubRouter(h host.Host) *GossipSubRouter { params := DefaultGossipSubParams() return &GossipSubRouter{ - peers: make(map[peer.ID]protocol.ID), - mesh: make(map[string]map[peer.ID]struct{}), - fanout: make(map[string]map[peer.ID]struct{}), - lastpub: make(map[string]int64), - gossip: make(map[peer.ID][]*pb.ControlIHave), - control: make(map[peer.ID]*pb.ControlMessage), - backoff: make(map[string]map[peer.ID]time.Time), - peerhave: make(map[peer.ID]int), - iasked: make(map[peer.ID]int), - outbound: make(map[peer.ID]bool), - connect: make(chan connectInfo, params.MaxPendingConnections), - cab: pstoremem.NewAddrBook(), - mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), - protos: GossipSubDefaultProtocols, - feature: GossipSubDefaultFeatures, - tagTracer: newTagTracer(h.ConnManager()), - params: params, + peers: make(map[peer.ID]protocol.ID), + mesh: make(map[string]map[peer.ID]struct{}), + fanout: make(map[string]map[peer.ID]struct{}), + lastpub: make(map[string]int64), + gossip: make(map[peer.ID][]*pb.ControlIHave), + control: make(map[peer.ID]*pb.ControlMessage), + backoff: make(map[string]map[peer.ID]time.Time), + peerhave: make(map[peer.ID]int), + peerdontwant: make(map[peer.ID]int), + unwanted: make(map[peer.ID]map[checksum]int), + iasked: make(map[peer.ID]int), + outbound: make(map[peer.ID]bool), + connect: make(chan connectInfo, params.MaxPendingConnections), + cab: pstoremem.NewAddrBook(), + mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), + protos: GossipSubDefaultProtocols, + feature: GossipSubDefaultFeatures, + tagTracer: newTagTracer(h.ConnManager()), + params: params, } } @@ -272,7 +303,10 @@ func DefaultGossipSubParams() GossipSubParams { GraftFloodThreshold: GossipSubGraftFloodThreshold, MaxIHaveLength: GossipSubMaxIHaveLength, MaxIHaveMessages: GossipSubMaxIHaveMessages, + MaxIDontWantMessages: GossipSubMaxIDontWantMessages, IWantFollowupTime: GossipSubIWantFollowupTime, + IDontWantMessageThreshold: GossipSubIDontWantMessageThreshold, + IDontWantMessageTTL: GossipSubIDontWantMessageTTL, SlowHeartbeatWarning: 0.1, } } @@ -421,20 +455,22 @@ func WithGossipSubParams(cfg GossipSubParams) Option { // is the fanout map. Fanout peer lists are expired if we don't publish any // messages to their topic for GossipSubFanoutTTL. type GossipSubRouter struct { - p *PubSub - peers map[peer.ID]protocol.ID // peer protocols - direct map[peer.ID]struct{} // direct peers - mesh map[string]map[peer.ID]struct{} // topic meshes - fanout map[string]map[peer.ID]struct{} // topic fanout - lastpub map[string]int64 // last publish time for fanout topics - gossip map[peer.ID][]*pb.ControlIHave // pending gossip - control map[peer.ID]*pb.ControlMessage // pending control messages - peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat - iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat - outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections - backoff map[string]map[peer.ID]time.Time // prune backoff - connect chan connectInfo // px connection requests - cab peerstore.AddrBook + p *PubSub + peers map[peer.ID]protocol.ID // peer protocols + direct map[peer.ID]struct{} // direct peers + mesh map[string]map[peer.ID]struct{} // topic meshes + fanout map[string]map[peer.ID]struct{} // topic fanout + lastpub map[string]int64 // last publish time for fanout topics + gossip map[peer.ID][]*pb.ControlIHave // pending gossip + control map[peer.ID]*pb.ControlMessage // pending control messages + peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat + peerdontwant map[peer.ID]int // number of IDONTWANTs received from peer in the last heartbeat + unwanted map[peer.ID]map[checksum]int // TTL of the message ids peers don't want + iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat + outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections + backoff map[string]map[peer.ID]time.Time // prune backoff + connect chan connectInfo // px connection requests + cab peerstore.AddrBook protos []protocol.ID feature GossipSubFeatureTest @@ -543,6 +579,13 @@ func (gs *GossipSubRouter) manageAddrBook() { for { select { case <-gs.p.ctx.Done(): + cabCloser, ok := gs.cab.(io.Closer) + if ok { + errClose := cabCloser.Close() + if errClose != nil { + log.Warnf("failed to close addr book: %v", errClose) + } + } return case ev := <-sub.Out(): switch ev := ev.(type) { @@ -655,6 +698,36 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus { return gs.gate.AcceptFrom(p) } +// PreValidation sends the IDONTWANT control messages to all the mesh +// peers. They need to be sent right before the validation because they +// should be seen by the peers as soon as possible. +func (gs *GossipSubRouter) PreValidation(msgs []*Message) { + tmids := make(map[string][]string) + for _, msg := range msgs { + if len(msg.GetData()) < gs.params.IDontWantMessageThreshold { + continue + } + topic := msg.GetTopic() + tmids[topic] = append(tmids[topic], gs.p.idGen.ID(msg)) + } + for topic, mids := range tmids { + if len(mids) == 0 { + continue + } + // shuffle the messages got from the RPC envelope + shuffleStrings(mids) + // send IDONTWANT to all the mesh peers + for p := range gs.mesh[topic] { + // send to only peers that support IDONTWANT + if gs.feature(GossipSubFeatureIdontwant, gs.peers[p]) { + idontwant := []*pb.ControlIDontWant{{MessageIDs: mids}} + out := rpcWithControl(nil, nil, nil, nil, nil, idontwant) + gs.sendRPC(p, out, true) + } + } + } +} + func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ctl := rpc.GetControl() if ctl == nil { @@ -665,13 +738,14 @@ func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ihave := gs.handleIWant(rpc.from, ctl) prune := gs.handleGraft(rpc.from, ctl) gs.handlePrune(rpc.from, ctl) + gs.handleIDontWant(rpc.from, ctl) if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 { return } - out := rpcWithControl(ihave, nil, iwant, nil, prune) - gs.sendRPC(rpc.from, out) + out := rpcWithControl(ihave, nil, iwant, nil, prune, nil) + gs.sendRPC(rpc.from, out, false) } func (gs *GossipSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant { @@ -923,6 +997,26 @@ func (gs *GossipSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) { } } +func (gs *GossipSubRouter) handleIDontWant(p peer.ID, ctl *pb.ControlMessage) { + if gs.unwanted[p] == nil { + gs.unwanted[p] = make(map[checksum]int) + } + + // IDONTWANT flood protection + if gs.peerdontwant[p] >= gs.params.MaxIDontWantMessages { + log.Debugf("IDONWANT: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring", p, gs.peerdontwant[p]) + return + } + gs.peerdontwant[p]++ + + // Remember all the unwanted message ids + for _, idontwant := range ctl.GetIdontwant() { + for _, mid := range idontwant.GetMessageIDs() { + gs.unwanted[p][computeChecksum(mid)] = gs.params.IDontWantMessageTTL + } + } +} + func (gs *GossipSubRouter) addBackoff(p peer.ID, topic string, isUnsubscribe bool) { backoff := gs.params.PruneBackoff if isUnsubscribe { @@ -1083,6 +1177,12 @@ func (gs *GossipSubRouter) Publish(msg *Message) { } for p := range gmap { + mid := gs.p.idGen.ID(msg) + // Check if it has already received an IDONTWANT for the message. + // If so, don't send it to the peer + if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok { + continue + } tosend[p] = struct{}{} } } @@ -1093,7 +1193,7 @@ func (gs *GossipSubRouter) Publish(msg *Message) { continue } - gs.sendRPC(pid, out) + gs.sendRPC(pid, out, false) } } @@ -1178,17 +1278,17 @@ func (gs *GossipSubRouter) Leave(topic string) { func (gs *GossipSubRouter) sendGraft(p peer.ID, topic string) { graft := []*pb.ControlGraft{{TopicID: &topic}} - out := rpcWithControl(nil, nil, nil, graft, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, nil, nil) + gs.sendRPC(p, out, false) } func (gs *GossipSubRouter) sendPrune(p peer.ID, topic string, isUnsubscribe bool) { prune := []*pb.ControlPrune{gs.makePrune(p, topic, gs.doPX, isUnsubscribe)} - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } -func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { +func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { // do we own the RPC? own := false @@ -1212,14 +1312,14 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { delete(gs.gossip, p) } - mch, ok := gs.p.peers[p] + q, ok := gs.p.peers[p] if !ok { return } // If we're below the max message size, go ahead and send if out.Size() < gs.p.maxMessageSize { - gs.doSendRPC(out, p, mch) + gs.doSendRPC(out, p, q, urgent) return } @@ -1231,12 +1331,14 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize)) continue } - gs.doSendRPC(rpc, p, mch) + gs.doSendRPC(rpc, p, q, urgent) } } func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { - log.Debugf("dropping message to peer %s: %s", p, reason) + if log.Level() <= zapcore.DebugLevel { + log.Debugf("dropping message to peer %s: %s", p, reason) + } gs.tracer.DropRPC(rpc, p) // push control messages that need to be retried ctl := rpc.GetControl() @@ -1245,13 +1347,18 @@ func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { } } -func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, mch chan *RPC) { - select { - case mch <- rpc: - gs.tracer.SendRPC(rpc, p) - default: +func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, q *rpcQueue, urgent bool) { + var err error + if urgent { + err = q.UrgentPush(rpc, false) + } else { + err = q.Push(rpc, false) + } + if err != nil { gs.doDropRPC(rpc, p, "queue full") + return } + gs.tracer.SendRPC(rpc, p) } // appendOrMergeRPC appends the given RPCs to the slice, merging them if possible. @@ -1433,6 +1540,9 @@ func (gs *GossipSubRouter) heartbeat() { // clean up iasked counters gs.clearIHaveCounters() + // clean up IDONTWANT counters + gs.clearIDontWantCounters() + // apply IWANT request penalties gs.applyIwantPenalties() @@ -1685,6 +1795,23 @@ func (gs *GossipSubRouter) clearIHaveCounters() { } } +func (gs *GossipSubRouter) clearIDontWantCounters() { + if len(gs.peerdontwant) > 0 { + // throw away the old map and make a new one + gs.peerdontwant = make(map[peer.ID]int) + } + + // decrement TTLs of all the IDONTWANTs and delete it from the cache when it reaches zero + for _, mids := range gs.unwanted { + for mid := range mids { + mids[mid]-- + if mids[mid] == 0 { + delete(mids, mid) + } + } + } +} + func (gs *GossipSubRouter) applyIwantPenalties() { for p, count := range gs.gossipTracer.GetBrokenPromises() { log.Infof("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) @@ -1759,8 +1886,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, } } - out := rpcWithControl(nil, nil, nil, graft, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, prune, nil) + gs.sendRPC(p, out, false) } for p, topics := range toprune { @@ -1769,8 +1896,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, prune = append(prune, gs.makePrune(p, topic, gs.doPX && !noPX[p], false)) } - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } } @@ -1836,15 +1963,15 @@ func (gs *GossipSubRouter) flush() { // send gossip first, which will also piggyback pending control for p, ihave := range gs.gossip { delete(gs.gossip, p) - out := rpcWithControl(nil, ihave, nil, nil, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, ihave, nil, nil, nil, nil) + gs.sendRPC(p, out, false) } // send the remaining control messages that wasn't merged with gossip for p, ctl := range gs.control { delete(gs.control, p) - out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune, nil) + gs.sendRPC(p, out, false) } } @@ -1865,9 +1992,10 @@ func (gs *GossipSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.Cont } func (gs *GossipSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) { - // remove IHAVE/IWANT from control message, gossip is not retried + // remove IHAVE/IWANT/IDONTWANT from control message, gossip is not retried ctl.Ihave = nil ctl.Iwant = nil + ctl.Idontwant = nil if ctl.Graft != nil || ctl.Prune != nil { gs.control[p] = ctl } @@ -2046,3 +2174,13 @@ func shuffleStrings(lst []string) { lst[i], lst[j] = lst[j], lst[i] } } + +func computeChecksum(mid string) checksum { + var cs checksum + if len(mid) > 32 || len(mid) == 0 { + cs.payload = sha256.Sum256([]byte(mid)) + } else { + cs.length = uint8(copy(cs.payload[:], mid)) + } + return cs +} diff --git a/gossipsub_connmgr_test.go b/gossipsub_connmgr_test.go index 0a97312c..10650a0e 100644 --- a/gossipsub_connmgr_test.go +++ b/gossipsub_connmgr_test.go @@ -7,15 +7,15 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/host" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" "github.com/libp2p/go-libp2p/p2p/net/connmgr" ) func TestGossipsubConnTagMessageDeliveries(t *testing.T) { - t.Skip("Test disabled with go-libp2p v0.22.0") // TODO: reenable test when updating to v0.23.0 + t.Skip("flaky test disabled") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -70,9 +70,14 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { t.Fatal(err) } - netw := swarmt.GenSwarm(t) - defer netw.Close() - h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i])) + h, err := libp2p.New( + libp2p.ResourceManager(&network.NullResourceManager{}), + libp2p.ConnectionManager(connmgrs[i]), + ) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { h.Close() }) honestHosts[i] = h honestPeers[h.ID()] = struct{}{} } @@ -83,9 +88,9 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { WithFloodPublish(true)) // sybil squatters to be connected later - sybilHosts := getNetHosts(t, ctx, nSquatter) + sybilHosts := getDefaultHosts(t, nSquatter) for _, h := range sybilHosts { - squatter := &sybilSquatter{h: h} + squatter := &sybilSquatter{h: h, ignoreErrors: true} h.SetStreamHandler(GossipSubID_v10, squatter.handleStream) } @@ -93,7 +98,7 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { connectAll(t, honestHosts) for _, h := range honestHosts { - if len(h.Network().Conns()) != nHonest-1 { + if len(h.Network().Peers()) != nHonest-1 { t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns())) } } @@ -139,24 +144,12 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { allHosts := append(honestHosts, sybilHosts...) connectAll(t, allHosts) - // verify that we have a bunch of connections - for _, h := range honestHosts { - if len(h.Network().Conns()) != nHonest+nSquatter-1 { - t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns())) - } - } - - // force the connection managers to trim, so we don't need to muck about with timing as much - for _, cm := range connmgrs { - cm.TrimOpenConns(ctx) - } - // we should still have conns to all the honest peers, but not the sybils for _, h := range honestHosts { nHonestConns := 0 nDishonestConns := 0 - for _, conn := range h.Network().Conns() { - if _, ok := honestPeers[conn.RemotePeer()]; !ok { + for _, p := range h.Network().Peers() { + if _, ok := honestPeers[p]; !ok { nDishonestConns++ } else { nHonestConns++ diff --git a/gossipsub_feat.go b/gossipsub_feat.go index d5750af3..49c7423c 100644 --- a/gossipsub_feat.go +++ b/gossipsub_feat.go @@ -18,18 +18,22 @@ const ( GossipSubFeatureMesh = iota // Protocol supports Peer eXchange on prune -- gossipsub-v1.1 compatible GossipSubFeaturePX + // Protocol supports IDONTWANT -- gossipsub-v1.2 compatible + GossipSubFeatureIdontwant ) // GossipSubDefaultProtocols is the default gossipsub router protocol list -var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v11, GossipSubID_v10, FloodSubID} +var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v12, GossipSubID_v11, GossipSubID_v10, FloodSubID} // GossipSubDefaultFeatures is the feature test function for the default gossipsub protocols func GossipSubDefaultFeatures(feat GossipSubFeature, proto protocol.ID) bool { switch feat { case GossipSubFeatureMesh: - return proto == GossipSubID_v11 || proto == GossipSubID_v10 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 || proto == GossipSubID_v10 case GossipSubFeaturePX: - return proto == GossipSubID_v11 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 + case GossipSubFeatureIdontwant: + return proto == GossipSubID_v12 default: return false } diff --git a/gossipsub_feat_test.go b/gossipsub_feat_test.go index 712f16df..ff3709a3 100644 --- a/gossipsub_feat_test.go +++ b/gossipsub_feat_test.go @@ -21,6 +21,9 @@ func TestDefaultGossipSubFeatures(t *testing.T) { if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v11) { t.Fatal("gossipsub-v1.1 should support Mesh") } + if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support Mesh") + } if GossipSubDefaultFeatures(GossipSubFeaturePX, FloodSubID) { t.Fatal("floodsub should not support PX") @@ -28,9 +31,25 @@ func TestDefaultGossipSubFeatures(t *testing.T) { if GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v10) { t.Fatal("gossipsub-v1.0 should not support PX") } - if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v11) { + if !GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v11) { t.Fatal("gossipsub-v1.1 should support PX") } + if !GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support PX") + } + + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, FloodSubID) { + t.Fatal("floodsub should not support IDONTWANT") + } + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v10) { + t.Fatal("gossipsub-v1.0 should not support IDONTWANT") + } + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v11) { + t.Fatal("gossipsub-v1.1 should not support IDONTWANT") + } + if !GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support IDONTWANT") + } } func TestGossipSubCustomProtocols(t *testing.T) { @@ -42,7 +61,7 @@ func TestGossipSubCustomProtocols(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) gsubs := getGossipsubs(ctx, hosts[:2], WithGossipSubProtocols(protos, features)) fsub := getPubsub(ctx, hosts[2]) diff --git a/gossipsub_matchfn_test.go b/gossipsub_matchfn_test.go index 279f0d34..4d688d25 100644 --- a/gossipsub_matchfn_test.go +++ b/gossipsub_matchfn_test.go @@ -17,7 +17,7 @@ func TestGossipSubMatchingFn(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 4) + h := getDefaultHosts(t, 4) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithGossipSubProtocols([]protocol.ID{customsubA100, GossipSubID_v11}, GossipSubDefaultFeatures)), getGossipsub(ctx, h[1], WithProtocolMatchFn(protocolNameMatch), WithGossipSubProtocols([]protocol.ID{customsubA101Beta}, GossipSubDefaultFeatures)), diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index f31daaab..df2fffff 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -2,7 +2,8 @@ package pubsub import ( "context" - "math/rand" + "crypto/rand" + "encoding/base64" "strconv" "sync" "testing" @@ -15,6 +16,7 @@ import ( pb "github.com/libp2p/go-libp2p-pubsub/pb" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) @@ -25,7 +27,7 @@ func TestGossipsubAttackSpamIWANT(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -120,7 +122,7 @@ func TestGossipsubAttackSpamIWANT(t *testing.T) { // being spammy) iwantlst := []string{DefaultMsgIdFn(msg)} iwant := []*pb.ControlIWant{{MessageIDs: iwantlst}} - orpc := rpcWithControl(nil, nil, iwant, nil, nil) + orpc := rpcWithControl(nil, nil, iwant, nil, nil, nil) writeMsg(&orpc.RPC) } }) @@ -142,7 +144,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -195,6 +197,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, }) + sub := sub go func() { defer cancel() @@ -206,7 +209,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { for i := 0; i < 3*GossipSubMaxIHaveLength; i++ { ihavelst := []string{"someid" + strconv.Itoa(i)} ihave := []*pb.ControlIHave{{TopicID: sub.Topicid, MessageIDs: ihavelst}} - orpc := rpcWithControl(nil, ihave, nil, nil, nil) + orpc := rpcWithControl(nil, ihave, nil, nil, nil, nil) writeMsg(&orpc.RPC) } @@ -236,7 +239,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { for i := 0; i < 3*GossipSubMaxIHaveLength; i++ { ihavelst := []string{"someid" + strconv.Itoa(i+100)} ihave := []*pb.ControlIHave{{TopicID: sub.Topicid, MessageIDs: ihavelst}} - orpc := rpcWithControl(nil, ihave, nil, nil, nil) + orpc := rpcWithControl(nil, ihave, nil, nil, nil, nil) writeMsg(&orpc.RPC) } @@ -292,7 +295,7 @@ func TestGossipsubAttackGRAFTNonExistentTopic(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -376,7 +379,7 @@ func TestGossipsubAttackGRAFTDuringBackoff(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -430,6 +433,7 @@ func TestGossipsubAttackGRAFTDuringBackoff(t *testing.T) { Control: &pb.ControlMessage{Graft: graft}, }) + sub := sub go func() { defer cancel() @@ -617,7 +621,7 @@ func TestGossipsubAttackInvalidMessageSpam(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -762,11 +766,139 @@ func TestGossipsubAttackInvalidMessageSpam(t *testing.T) { <-ctx.Done() } +// Test that when Gossipsub receives too many IDONTWANT messages from a peer +func TestGossipsubAttackSpamIDONTWANT(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := time.Second + GossipSubHeartbeatInterval + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received some messages + var expMid string + var actMids []string + checkMsgs := func() { + if len(actMids) == 0 { + t.Fatalf("Expected some messages when the maximum number of IDONTWANTs is reached") + } + if actMids[0] != expMid { + t.Fatalf("The expected message is incorrect") + } + if len(actMids) > 1 { + t.Fatalf("The spam prevention should be reset after the heartbeat") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Each time the host receives a message + for _, msg := range irpc.GetPublish() { + actMids = append(actMids, msgID(msg)) + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + var mid string + for i := 0; i < 1+GossipSubMaxIDontWantMessages; i++ { + rand.Read(data) + mid = msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + } + // The host should receives this message id because the maximum was reached + expMid = mid + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + + // Wait for the next heartbeat so that the prevention will be reset + select { + case <-ctx.Done(): + return + case <-time.After(GossipSubHeartbeatInterval): + } + + // Test IDONTWANT again to see that it now works again + rand.Read(data) + mid = msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + time.Sleep(100 * time.Millisecond) + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC) func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) { + newMockGSWithVersion(ctx, t, attacker, protocol.ID("/meshsub/1.2.0"), onReadMsg) +} + +func newMockGSWithVersion(ctx context.Context, t *testing.T, attacker host.Host, gossipSubID protocol.ID, onReadMsg mockGSOnRead) { // Listen on the gossipsub protocol - const gossipSubID = protocol.ID("/meshsub/1.0.0") const maxMessageSize = 1024 * 1024 attacker.SetStreamHandler(gossipSubID, func(stream network.Stream) { // When an incoming stream is opened, set up an outgoing stream diff --git a/gossipsub_test.go b/gossipsub_test.go index 5933f4b5..d515654f 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3,9 +3,12 @@ package pubsub import ( "bytes" "context" + crand "crypto/rand" + "encoding/base64" "fmt" "io" - "math/rand" + mrand "math/rand" + "sort" "sync" "sync/atomic" "testing" @@ -13,16 +16,14 @@ import ( pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) @@ -45,7 +46,7 @@ func getGossipsubs(ctx context.Context, hs []host.Host, opts ...Option) []*PubSu func TestSparseGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -67,7 +68,7 @@ func TestSparseGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -86,7 +87,7 @@ func TestSparseGossipsub(t *testing.T) { func TestDenseGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -108,7 +109,7 @@ func TestDenseGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -127,7 +128,7 @@ func TestDenseGossipsub(t *testing.T) { func TestGossipsubFanout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -196,7 +197,7 @@ func TestGossipsubFanout(t *testing.T) { func TestGossipsubFanoutMaintenance(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -281,7 +282,7 @@ func TestGossipsubFanoutExpiry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) @@ -340,7 +341,7 @@ func TestGossipsubFanoutExpiry(t *testing.T) { func TestGossipsubGossip(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -362,7 +363,7 @@ func TestGossipsubGossip(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -388,7 +389,7 @@ func TestGossipsubGossipPiggyback(t *testing.T) { t.Skip("test no longer relevant; gossip propagation has become eager") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -420,7 +421,7 @@ func TestGossipsubGossipPiggyback(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) psubs[owner].Publish("bazcrux", msg) @@ -457,7 +458,7 @@ func TestGossipsubGossipPropagation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) hosts1 := hosts[:GossipSubD+1] @@ -537,7 +538,7 @@ func TestGossipsubGossipPropagation(t *testing.T) { func TestGossipsubPrune(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -567,7 +568,7 @@ func TestGossipsubPrune(t *testing.T) { for i := 0; i < 10; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -586,7 +587,7 @@ func TestGossipsubPrune(t *testing.T) { func TestGossipsubPruneBackoffTime(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) // App specific score that we'll change later. currentScoreForHost0 := int32(0) @@ -665,7 +666,7 @@ func TestGossipsubPruneBackoffTime(t *testing.T) { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) // Don't publish from host 0, since everyone should have pruned it. - owner := rand.Intn(len(psubs)-1) + 1 + owner := mrand.Intn(len(psubs)-1) + 1 psubs[owner].Publish("foobar", msg) @@ -684,7 +685,7 @@ func TestGossipsubPruneBackoffTime(t *testing.T) { func TestGossipsubGraft(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -710,7 +711,7 @@ func TestGossipsubGraft(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -729,7 +730,7 @@ func TestGossipsubGraft(t *testing.T) { func TestGossipsubRemovePeer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -759,7 +760,7 @@ func TestGossipsubRemovePeer(t *testing.T) { for i := 0; i < 10; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := 5 + rand.Intn(len(psubs)-5) + owner := 5 + mrand.Intn(len(psubs)-5) psubs[owner].Publish("foobar", msg) @@ -779,7 +780,7 @@ func TestGossipsubGraftPruneRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) denseConnect(t, hosts) @@ -807,7 +808,7 @@ func TestGossipsubGraftPruneRetry(t *testing.T) { for i, topic := range topics { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish(topic, msg) @@ -829,7 +830,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) denseConnect(t, hosts) @@ -853,7 +854,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { // create a background flood of messages that overloads the queues done := make(chan struct{}) go func() { - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) for i := 0; i < 10000; i++ { msg := []byte("background flooooood") psubs[owner].Publish("flood", msg) @@ -891,7 +892,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { for i, topic := range topics { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish(topic, msg) @@ -910,7 +911,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { func TestMixedGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 30) + hosts := getDefaultHosts(t, 30) gsubs := getGossipsubs(ctx, hosts[:20]) fsubs := getPubsubs(ctx, hosts[20:]) @@ -934,7 +935,7 @@ func TestMixedGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -954,7 +955,7 @@ func TestGossipsubMultihops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 6) + hosts := getDefaultHosts(t, 6) psubs := getGossipsubs(ctx, hosts) @@ -997,7 +998,7 @@ func TestGossipsubTreeTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -1061,7 +1062,7 @@ func TestGossipsubStarTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithPeerExchange(true), WithFloodPublish(true)) // configure the center of the star with a very low D @@ -1223,7 +1224,7 @@ func TestGossipsubDirectPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithDirectConnectTicks(2)), getGossipsub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}}), WithDirectConnectTicks(2)), @@ -1287,7 +1288,7 @@ func TestGossipSubPeerFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithPeerFilter(func(pid peer.ID, topic string) bool { return pid == h[1].ID() @@ -1329,7 +1330,7 @@ func TestGossipsubDirectPeersFanout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}})), @@ -1416,7 +1417,7 @@ func TestGossipsubFloodPublish(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithFloodPublish(true)) // build the star @@ -1451,7 +1452,7 @@ func TestGossipsubEnoughPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) for _, ps := range psubs { @@ -1500,7 +1501,7 @@ func TestGossipsubCustomParams(t *testing.T) { wantedMaxPendingConns := 23 params.MaxPendingConnections = wantedMaxPendingConns - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getGossipsubs(ctx, hosts, WithGossipSubParams(params)) @@ -1529,7 +1530,7 @@ func TestGossipsubNegativeScore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithPeerScore( &PeerScoreParams{ @@ -1613,7 +1614,7 @@ func TestGossipsubScoreValidatorEx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getGossipsubs(ctx, hosts, WithPeerScore( &PeerScoreParams{ @@ -1701,8 +1702,7 @@ func TestGossipsubPiggybackControl(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h.Close() + h := getDefaultHosts(t, 1)[0] ps := getGossipsub(ctx, h) blah := peer.ID("bogotr0n") @@ -1750,7 +1750,7 @@ func TestGossipsubMultipleGraftTopics(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getGossipsubs(ctx, hosts) sparseConnect(t, hosts) @@ -1818,7 +1818,7 @@ func TestGossipsubOpportunisticGrafting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 50) + hosts := getDefaultHosts(t, 50) // pubsubs for the first 10 hosts psubs := getGossipsubs(ctx, hosts[:10], WithFloodPublish(true), @@ -1919,7 +1919,7 @@ func TestGossipSubLeaveTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 2) + h := getDefaultHosts(t, 2) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1]), @@ -1928,13 +1928,11 @@ func TestGossipSubLeaveTopic(t *testing.T) { connect(t, h[0], h[1]) // Join all peers - var subs []*Subscription for _, ps := range psubs { - sub, err := ps.Subscribe("test") + _, err := ps.Subscribe("test") if err != nil { t.Fatal(err) } - subs = append(subs, sub) } time.Sleep(time.Second) @@ -1990,7 +1988,7 @@ func TestGossipSubJoinTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1]), @@ -2009,13 +2007,11 @@ func TestGossipSubJoinTopic(t *testing.T) { router0.backoff["test"] = peerMap // Join all peers - var subs []*Subscription for _, ps := range psubs { - sub, err := ps.Subscribe("test") + _, err := ps.Subscribe("test") if err != nil { t.Fatal(err) } - subs = append(subs, sub) } time.Sleep(time.Second) @@ -2032,7 +2028,8 @@ func TestGossipSubJoinTopic(t *testing.T) { } type sybilSquatter struct { - h host.Host + h host.Host + ignoreErrors bool // set to false to ignore connection/stream errors. } func (sq *sybilSquatter) handleStream(s network.Stream) { @@ -2040,7 +2037,10 @@ func (sq *sybilSquatter) handleStream(s network.Stream) { os, err := sq.h.NewStream(context.Background(), s.Conn().RemotePeer(), GossipSubID_v10) if err != nil { - panic(err) + if !sq.ignoreErrors { + panic(err) + } + return } // send a subscription for test in the output stream to become candidate for GRAFT @@ -2051,7 +2051,10 @@ func (sq *sybilSquatter) handleStream(s network.Stream) { topic := "test" err = w.WriteMsg(&pb.RPC{Subscriptions: []*pb.RPC_SubOpts{{Subscribe: &truth, Topicid: &topic}}}) if err != nil { - panic(err) + if !sq.ignoreErrors { + panic(err) + } + return } var rpc pb.RPC @@ -2072,7 +2075,7 @@ func TestGossipsubPeerScoreInspect(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) inspector := &mockPeerScoreInspector{} psub1 := getGossipsub(ctx, hosts[0], @@ -2132,7 +2135,7 @@ func TestGossipsubPeerScoreResetTopicParams(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) ps := getGossipsub(ctx, hosts[0], WithPeerScore( @@ -2199,7 +2202,7 @@ func TestGossipsubRPCFragmentation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps := getGossipsub(ctx, hosts[0]) // make a fake peer that requests everything through IWANT gossip @@ -2222,7 +2225,7 @@ func TestGossipsubRPCFragmentation(t *testing.T) { msgSize := 20000 for i := 0; i < nMessages; i++ { msg := make([]byte, msgSize) - rand.Read(msg) + crand.Read(msg) ps.Publish("test", msg) time.Sleep(20 * time.Millisecond) } @@ -2326,7 +2329,7 @@ func (iwe *iwantEverything) handleStream(s network.Stream) { } } - out := rpcWithControl(nil, nil, iwants, nil, prunes) + out := rpcWithControl(nil, nil, iwants, nil, prunes, nil) err = w.WriteMsg(out) if err != nil { panic(err) @@ -2362,7 +2365,7 @@ func TestFragmentRPCFunction(t *testing.T) { mkMsg := func(size int) *pb.Message { msg := &pb.Message{} msg.Data = make([]byte, size-4) // subtract the protobuf overhead, so msg.Size() returns requested size - rand.Read(msg.Data) + crand.Read(msg.Data) return msg } @@ -2476,7 +2479,7 @@ func TestFragmentRPCFunction(t *testing.T) { messageIds := make([]string, msgsPerTopic) for m := 0; m < msgsPerTopic; m++ { mid := make([]byte, messageIdSize) - rand.Read(mid) + crand.Read(mid) messageIds[m] = string(mid) } rpc.Control.Ihave[i] = &pb.ControlIHave{MessageIDs: messageIds} @@ -2497,7 +2500,7 @@ func TestFragmentRPCFunction(t *testing.T) { // It should not be present in the fragmented messages, but smaller IDs should be rpc.Reset() giantIdBytes := make([]byte, limit*2) - rand.Read(giantIdBytes) + crand.Read(giantIdBytes) rpc.Control = &pb.ControlMessage{ Iwant: []*pb.ControlIWant{ {MessageIDs: []string{"hello", string(giantIdBytes)}}, @@ -2553,21 +2556,6 @@ func FuzzAppendOrMergeRPC(f *testing.F) { }) } -func getDefaultHosts(t *testing.T, n int) []host.Host { - var out []host.Host - - for i := 0; i < n; i++ { - h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { h.Close() }) - out = append(out, h) - } - - return out -} - func TestGossipsubManagesAnAddressBook(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -2605,3 +2593,593 @@ func TestGossipsubManagesAnAddressBook(t *testing.T) { t.Fatalf("expected no addrs, got %d addrs", len(addrs)) } } + +func TestGossipsubIdontwantSend(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + validated := false + validate := func(context.Context, peer.ID, *Message) bool { + time.Sleep(100 * time.Millisecond) + validated = true + return true + } + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], + WithGossipSubParams(params), + WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], + WithGossipSubParams(params), + WithMessageIdFn(msgID), + WithDefaultValidator(validate)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + var expMids []string + var actMids []string + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + m := &pb.Message{Data: data} + expMids = append(expMids, msgID(m)) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received the right IDONTWANT messages + checkMsgs := func() { + sort.Strings(actMids) + sort.Strings(expMids) + + if len(actMids) != len(expMids) { + t.Fatalf("Expected %d IDONTWANT messages, got %d", len(expMids), len(actMids)) + } + for i, expMid := range expMids { + actMid := actMids[i] + if actMid != expMid { + t.Fatalf("Expected the id of %s in the %d'th IDONTWANT messages, got %s", expMid, i+1, actMid) + } + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for _, idonthave := range irpc.GetControl().GetIdontwant() { + // If true, it means that, when we get IDONTWANT, the middle peer has done validation + // already, which should not be the case + if validated { + t.Fatalf("IDONTWANT should be sent before doing validation") + } + for _, mid := range idonthave.GetMessageIDs() { + // Add the message to the list and reset the timer + actMids = append(actMids, mid) + msgTimer.Reset(msgWaitMax) + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +func TestGossipsubIdontwantReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received no messages + received := false + checkMsgs := func() { + if received { + t.Fatalf("Expected no messages received after IDONWANT") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Check if it receives any message + if len(irpc.GetPublish()) > 0 { + received = true + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + crand.Read(data) + mid := msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that non-mesh peers will not get IDONTWANT +func TestGossipsubIdontwantNonMesh(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and pruning to the middle peer to make sure + // that it's not in the mesh + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Prune: []*pb.ControlPrune{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that peers with incompatible versions will not get IDONTWANT +func TestGossipsubIdontwantIncompat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + // Use the old GossipSub version + newMockGSWithVersion(ctx, t, hosts[2], protocol.ID("/meshsub/1.1.0"), func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that IDONTWANT will not be sent for small messages +func TestGossipsubIdontwantSmallMessage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 8) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and pruning to the middle peer to make sure + // that it's not in the mesh + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that IDONTWANT will cleared when it's old enough +func TestGossipsubIdontwantClear(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := 5 * time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received some message after the IDONTWANT is cleared + received := false + checkMsgs := func() { + if !received { + t.Fatalf("Expected some message after the IDONTWANT is cleared") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Check if it receives any message + if len(irpc.GetPublish()) > 0 { + received = true + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + crand.Read(data) + mid := msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Wait for 4 heartbeats to make sure the IDONTWANT is cleared + time.Sleep(4 * time.Second) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +func BenchmarkAllocDoDropRPC(b *testing.B) { + gs := GossipSubRouter{tracer: &pubsubTracer{}} + + for i := 0; i < b.N; i++ { + gs.doDropRPC(&RPC{}, "peerID", "reason") + } +} diff --git a/notify.go b/notify.go deleted file mode 100644 index f560d398..00000000 --- a/notify.go +++ /dev/null @@ -1,75 +0,0 @@ -package pubsub - -import ( - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -var _ network.Notifiee = (*PubSubNotif)(nil) - -type PubSubNotif PubSub - -func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) Connected(n network.Network, c network.Conn) { - // ignore transient connections - if c.Stat().Limited { - return - } - - go func() { - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - p.newPeersPend[c.RemotePeer()] = struct{}{} - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } - }() -} - -func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) { -} - -func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) Initialize() { - isTransient := func(pid peer.ID) bool { - for _, c := range p.host.Network().ConnsToPeer(pid) { - if !c.Stat().Limited { - return false - } - } - - return true - } - - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - for _, pid := range p.host.Network().Peers() { - if isTransient(pid) { - continue - } - - p.newPeersPend[pid] = struct{}{} - } - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } -} diff --git a/notify_test.go b/notify_test.go new file mode 100644 index 00000000..fa5b755a --- /dev/null +++ b/notify_test.go @@ -0,0 +1,76 @@ +package pubsub + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/p2p/protocol/identify" +) + +func TestNotifyPeerProtocolsUpdated(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getDefaultHosts(t, 2) + + // Initialize id services. + { + ids1, err := identify.NewIDService(hosts[0]) + if err != nil { + t.Fatal(err) + } + ids1.Start() + defer ids1.Close() + + ids2, err := identify.NewIDService(hosts[1]) + if err != nil { + t.Fatal(err) + } + ids2.Start() + defer ids2.Close() + } + + psubs0 := getPubsub(ctx, hosts[0]) + connect(t, hosts[0], hosts[1]) + // Delay to make sure that peers are connected. + <-time.After(time.Millisecond * 100) + psubs1 := getPubsub(ctx, hosts[1]) + + // Pubsub 0 joins topic "test". + topic0, err := psubs0.Join("test") + if err != nil { + t.Fatal(err) + } + defer topic0.Close() + + sub0, err := topic0.Subscribe() + if err != nil { + t.Fatal(err) + } + defer sub0.Cancel() + + // Pubsub 1 joins topic "test". + topic1, err := psubs1.Join("test") + if err != nil { + t.Fatal(err) + } + defer topic1.Close() + + sub1, err := topic1.Subscribe() + if err != nil { + t.Fatal(err) + } + defer sub1.Cancel() + + // Delay before checking results (similar to most tests). + <-time.After(time.Millisecond * 100) + + if len(topic0.ListPeers()) == 0 { + t.Fatalf("topic0 should at least have 1 peer") + } + + if len(topic1.ListPeers()) == 0 { + t.Fatalf("topic1 should at least have 1 peer") + } +} diff --git a/pb/rpc.pb.go b/pb/rpc.pb.go index c6a2475f..213cdcc3 100644 --- a/pb/rpc.pb.go +++ b/pb/rpc.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -228,13 +229,14 @@ func (m *Message) GetKey() []byte { } type ControlMessage struct { - Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*ControlIDontWant `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ControlMessage) Reset() { *m = ControlMessage{} } @@ -298,6 +300,13 @@ func (m *ControlMessage) GetPrune() []*ControlPrune { return nil } +func (m *ControlMessage) GetIdontwant() []*ControlIDontWant { + if m != nil { + return m.Idontwant + } + return nil +} + type ControlIHave struct { TopicID *string `protobuf:"bytes,1,opt,name=topicID" json:"topicID,omitempty"` // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings @@ -512,6 +521,54 @@ func (m *ControlPrune) GetBackoff() uint64 { return 0 } +type ControlIDontWant struct { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControlIDontWant) Reset() { *m = ControlIDontWant{} } +func (m *ControlIDontWant) String() string { return proto.CompactTextString(m) } +func (*ControlIDontWant) ProtoMessage() {} +func (*ControlIDontWant) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} +func (m *ControlIDontWant) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControlIDontWant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ControlIDontWant.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ControlIDontWant) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControlIDontWant.Merge(m, src) +} +func (m *ControlIDontWant) XXX_Size() int { + return m.Size() +} +func (m *ControlIDontWant) XXX_DiscardUnknown() { + xxx_messageInfo_ControlIDontWant.DiscardUnknown(m) +} + +var xxx_messageInfo_ControlIDontWant proto.InternalMessageInfo + +func (m *ControlIDontWant) GetMessageIDs() []string { + if m != nil { + return m.MessageIDs + } + return nil +} + type PeerInfo struct { PeerID []byte `protobuf:"bytes,1,opt,name=peerID" json:"peerID,omitempty"` SignedPeerRecord []byte `protobuf:"bytes,2,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"` @@ -524,7 +581,7 @@ func (m *PeerInfo) Reset() { *m = PeerInfo{} } func (m *PeerInfo) String() string { return proto.CompactTextString(m) } func (*PeerInfo) ProtoMessage() {} func (*PeerInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} + return fileDescriptor_77a6da22d6a3feb1, []int{8} } func (m *PeerInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,43 +633,46 @@ func init() { proto.RegisterType((*ControlIWant)(nil), "pubsub.pb.ControlIWant") proto.RegisterType((*ControlGraft)(nil), "pubsub.pb.ControlGraft") proto.RegisterType((*ControlPrune)(nil), "pubsub.pb.ControlPrune") + proto.RegisterType((*ControlIDontWant)(nil), "pubsub.pb.ControlIDontWant") proto.RegisterType((*PeerInfo)(nil), "pubsub.pb.PeerInfo") } func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 480 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x3c, - 0x10, 0xc7, 0xe5, 0x6d, 0xbb, 0xd9, 0xcc, 0xe6, 0xfb, 0xb4, 0x32, 0x68, 0x31, 0x08, 0x55, 0x55, - 0x4e, 0x01, 0x41, 0x0e, 0xcb, 0x95, 0x0b, 0xb4, 0x12, 0x9b, 0x03, 0x50, 0x99, 0x03, 0x67, 0x27, - 0x75, 0xba, 0xd1, 0x6e, 0x63, 0x63, 0x3b, 0x8b, 0x78, 0x08, 0xde, 0x8b, 0x03, 0x07, 0x1e, 0x01, - 0xf5, 0xc6, 0x5b, 0x20, 0x3b, 0x4e, 0x9a, 0xa5, 0x94, 0x9b, 0xe7, 0xef, 0xdf, 0xcc, 0xfc, 0x3d, - 0x1e, 0x08, 0x95, 0x2c, 0x52, 0xa9, 0x84, 0x11, 0x38, 0x94, 0x4d, 0xae, 0x9b, 0x3c, 0x95, 0x79, - 0xfc, 0x0b, 0xc1, 0x88, 0x2e, 0xe7, 0xf8, 0x25, 0xfc, 0xa7, 0x9b, 0x5c, 0x17, 0xaa, 0x92, 0xa6, - 0x12, 0xb5, 0x26, 0x68, 0x36, 0x4a, 0x4e, 0x2f, 0xce, 0xd3, 0x1e, 0x4d, 0xe9, 0x72, 0x9e, 0x7e, - 0x68, 0xf2, 0xf7, 0xd2, 0x68, 0x7a, 0x17, 0xc6, 0xcf, 0x20, 0x90, 0x4d, 0x7e, 0x53, 0xe9, 0x2b, - 0x72, 0xe4, 0xf2, 0xf0, 0x20, 0xef, 0x2d, 0xd7, 0x9a, 0xad, 0x39, 0xed, 0x10, 0xfc, 0x02, 0x82, - 0x42, 0xd4, 0x46, 0x89, 0x1b, 0x32, 0x9a, 0xa1, 0xe4, 0xf4, 0xe2, 0xe1, 0x80, 0x9e, 0xb7, 0x37, - 0x7d, 0x92, 0x27, 0x1f, 0xbd, 0x82, 0xc0, 0x37, 0xc7, 0x8f, 0x21, 0xf4, 0xed, 0x73, 0x4e, 0xd0, - 0x0c, 0x25, 0x27, 0x74, 0x27, 0x60, 0x02, 0x81, 0x11, 0xb2, 0x2a, 0xaa, 0x15, 0x39, 0x9a, 0xa1, - 0x24, 0xa4, 0x5d, 0x18, 0x7f, 0x45, 0x10, 0xf8, 0xba, 0x18, 0xc3, 0xb8, 0x54, 0x62, 0xe3, 0xd2, - 0x23, 0xea, 0xce, 0x56, 0x5b, 0x31, 0xc3, 0x5c, 0x5a, 0x44, 0xdd, 0x19, 0xdf, 0x87, 0x89, 0xe6, - 0x9f, 0x6a, 0xe1, 0x9c, 0x46, 0xb4, 0x0d, 0xac, 0xea, 0x8a, 0x92, 0xb1, 0xeb, 0xd0, 0x06, 0xce, - 0x57, 0xb5, 0xae, 0x99, 0x69, 0x14, 0x27, 0x13, 0xc7, 0xef, 0x04, 0x7c, 0x06, 0xa3, 0x6b, 0xfe, - 0x85, 0x1c, 0x3b, 0xdd, 0x1e, 0xe3, 0xef, 0x08, 0xfe, 0xbf, 0xfb, 0x5c, 0xfc, 0x1c, 0x26, 0xd5, - 0x15, 0xbb, 0xe5, 0x7e, 0xfc, 0x0f, 0xf6, 0x07, 0x93, 0x5d, 0xb2, 0x5b, 0x4e, 0x5b, 0xca, 0xe1, - 0x9f, 0x59, 0x6d, 0xfc, 0xd4, 0xff, 0x86, 0x7f, 0x64, 0xb5, 0xa1, 0x2d, 0x65, 0xf1, 0xb5, 0x62, - 0xa5, 0x21, 0xa3, 0x43, 0xf8, 0x1b, 0x7b, 0x4d, 0x5b, 0xca, 0xe2, 0x52, 0x35, 0x35, 0x27, 0xe3, - 0x43, 0xf8, 0xd2, 0x5e, 0xd3, 0x96, 0x8a, 0x2f, 0x21, 0x1a, 0x7a, 0xec, 0x3f, 0x22, 0x5b, 0xb8, - 0x29, 0x77, 0x1f, 0x91, 0x2d, 0xf0, 0x14, 0x60, 0xd3, 0x3e, 0x38, 0x5b, 0x68, 0xe7, 0x3d, 0xa4, - 0x03, 0x25, 0x4e, 0x77, 0x95, 0xac, 0xfd, 0x3f, 0x78, 0xb4, 0xc7, 0x27, 0x3d, 0xef, 0xfc, 0x1f, - 0xee, 0x1c, 0x6f, 0x7a, 0xd2, 0x59, 0xff, 0x87, 0xc7, 0x27, 0x30, 0x91, 0x9c, 0x2b, 0xed, 0x47, - 0x7b, 0x6f, 0xf0, 0xf8, 0x25, 0xe7, 0x2a, 0xab, 0x4b, 0x41, 0x5b, 0xc2, 0x16, 0xc9, 0x59, 0x71, - 0x2d, 0xca, 0xd2, 0x6d, 0xc9, 0x98, 0x76, 0x61, 0xfc, 0x0e, 0x4e, 0x3a, 0x18, 0x9f, 0xc3, 0xb1, - 0xc5, 0x7d, 0xa7, 0x88, 0xfa, 0x08, 0x3f, 0x85, 0x33, 0xbb, 0x24, 0x7c, 0x65, 0x49, 0xca, 0x0b, - 0xa1, 0x56, 0x7e, 0x03, 0xf7, 0xf4, 0xd7, 0xd1, 0xb7, 0xed, 0x14, 0xfd, 0xd8, 0x4e, 0xd1, 0xcf, - 0xed, 0x14, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xf8, 0xc4, 0x6e, 0xd2, 0x03, 0x00, 0x00, + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcd, 0x6e, 0x13, 0x31, + 0x10, 0xc7, 0xe5, 0x7c, 0x34, 0xdd, 0xe9, 0x82, 0x22, 0x83, 0x8a, 0xf9, 0x50, 0x14, 0xed, 0x29, + 0x20, 0xd8, 0x43, 0x38, 0x21, 0x71, 0x81, 0x44, 0xa2, 0x39, 0x00, 0x91, 0x39, 0x70, 0xde, 0xdd, + 0x38, 0xe9, 0xaa, 0x8d, 0x6d, 0x6c, 0x6f, 0x11, 0x4f, 0xc0, 0x89, 0xf7, 0xe2, 0xc8, 0x23, 0xa0, + 0xdc, 0x78, 0x0b, 0xe4, 0x59, 0xe7, 0xa3, 0x4d, 0x03, 0x37, 0xcf, 0xf8, 0x37, 0xfe, 0xff, 0x67, + 0xc6, 0x10, 0x19, 0x5d, 0xa4, 0xda, 0x28, 0xa7, 0x68, 0xa4, 0xab, 0xdc, 0x56, 0x79, 0xaa, 0xf3, + 0xe4, 0x0f, 0x81, 0x26, 0x9f, 0x8e, 0xe8, 0x6b, 0xb8, 0x63, 0xab, 0xdc, 0x16, 0xa6, 0xd4, 0xae, + 0x54, 0xd2, 0x32, 0xd2, 0x6f, 0x0e, 0x4e, 0x86, 0xa7, 0xe9, 0x06, 0x4d, 0xf9, 0x74, 0x94, 0x7e, + 0xaa, 0xf2, 0x8f, 0xda, 0x59, 0x7e, 0x1d, 0xa6, 0xcf, 0xa1, 0xa3, 0xab, 0xfc, 0xb2, 0xb4, 0xe7, + 0xac, 0x81, 0x75, 0x74, 0xa7, 0xee, 0xbd, 0xb0, 0x36, 0x5b, 0x08, 0xbe, 0x46, 0xe8, 0x4b, 0xe8, + 0x14, 0x4a, 0x3a, 0xa3, 0x2e, 0x59, 0xb3, 0x4f, 0x06, 0x27, 0xc3, 0x87, 0x3b, 0xf4, 0xa8, 0xbe, + 0xd9, 0x14, 0x05, 0xf2, 0xd1, 0x1b, 0xe8, 0x04, 0x71, 0xfa, 0x04, 0xa2, 0x20, 0x9f, 0x0b, 0x46, + 0xfa, 0x64, 0x70, 0xcc, 0xb7, 0x09, 0xca, 0xa0, 0xe3, 0x94, 0x2e, 0x8b, 0x72, 0xc6, 0x1a, 0x7d, + 0x32, 0x88, 0xf8, 0x3a, 0x4c, 0x7e, 0x10, 0xe8, 0x84, 0x77, 0x29, 0x85, 0xd6, 0xdc, 0xa8, 0x25, + 0x96, 0xc7, 0x1c, 0xcf, 0x3e, 0x37, 0xcb, 0x5c, 0x86, 0x65, 0x31, 0xc7, 0x33, 0xbd, 0x0f, 0x6d, + 0x2b, 0xbe, 0x48, 0x85, 0x4e, 0x63, 0x5e, 0x07, 0x3e, 0x8b, 0x8f, 0xb2, 0x16, 0x2a, 0xd4, 0x01, + 0xfa, 0x2a, 0x17, 0x32, 0x73, 0x95, 0x11, 0xac, 0x8d, 0xfc, 0x36, 0x41, 0xbb, 0xd0, 0xbc, 0x10, + 0xdf, 0xd8, 0x11, 0xe6, 0xfd, 0x31, 0xf9, 0xde, 0x80, 0xbb, 0xd7, 0xdb, 0xa5, 0x2f, 0xa0, 0x5d, + 0x9e, 0x67, 0x57, 0x22, 0x8c, 0xff, 0xc1, 0xfe, 0x60, 0x26, 0x67, 0xd9, 0x95, 0xe0, 0x35, 0x85, + 0xf8, 0xd7, 0x4c, 0xba, 0x30, 0xf5, 0xdb, 0xf0, 0xcf, 0x99, 0x74, 0xbc, 0xa6, 0x3c, 0xbe, 0x30, + 0xd9, 0xdc, 0xb1, 0xe6, 0x21, 0xfc, 0x9d, 0xbf, 0xe6, 0x35, 0xe5, 0x71, 0x6d, 0x2a, 0x29, 0x58, + 0xeb, 0x10, 0x3e, 0xf5, 0xd7, 0xbc, 0xa6, 0xe8, 0x2b, 0x88, 0xca, 0x99, 0x92, 0x0e, 0x0d, 0xb5, + 0xb1, 0xe4, 0xf1, 0x2d, 0x86, 0xc6, 0x4a, 0x3a, 0x34, 0xb5, 0xa5, 0x93, 0x33, 0x88, 0x77, 0xdb, + 0xdb, 0xec, 0x70, 0x32, 0xc6, 0x05, 0xad, 0x77, 0x38, 0x19, 0xd3, 0x1e, 0xc0, 0xb2, 0x9e, 0xd5, + 0x64, 0x6c, 0xb1, 0xed, 0x88, 0xef, 0x64, 0x92, 0x74, 0xfb, 0x92, 0x17, 0xb9, 0xc1, 0x93, 0x3d, + 0x7e, 0xb0, 0xe1, 0xb1, 0xf5, 0xc3, 0xca, 0xc9, 0x72, 0x43, 0x62, 0xd7, 0xff, 0xf0, 0xf8, 0x14, + 0xda, 0x5a, 0x08, 0x63, 0xc3, 0x56, 0xee, 0xed, 0x0c, 0x61, 0x2a, 0x84, 0x99, 0xc8, 0xb9, 0xe2, + 0x35, 0xe1, 0x1f, 0xc9, 0xb3, 0xe2, 0x42, 0xcd, 0xe7, 0xf8, 0xc1, 0x5a, 0x7c, 0x1d, 0x26, 0x43, + 0xe8, 0xde, 0x9c, 0xd8, 0x7f, 0x9b, 0xf9, 0x00, 0xc7, 0x6b, 0x01, 0x7a, 0x0a, 0x47, 0x5e, 0x22, + 0xb8, 0x8b, 0x79, 0x88, 0xe8, 0x33, 0xe8, 0xfa, 0x3f, 0x29, 0x66, 0x9e, 0xe4, 0xa2, 0x50, 0x66, + 0x16, 0x3e, 0xfc, 0x5e, 0xfe, 0x6d, 0xfc, 0x73, 0xd5, 0x23, 0xbf, 0x56, 0x3d, 0xf2, 0x7b, 0xd5, + 0x23, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xba, 0x73, 0x8e, 0xbf, 0x41, 0x04, 0x00, 0x00, } func (m *RPC) Marshal() (dAtA []byte, err error) { @@ -819,6 +879,20 @@ func (m *ControlMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -1044,6 +1118,42 @@ func (m *ControlPrune) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ControlIDontWant) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlIDontWant) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControlIDontWant) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *PeerInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1209,6 +1319,12 @@ func (m *ControlMessage) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1296,6 +1412,24 @@ func (m *ControlPrune) Size() (n int) { return n } +func (m *ControlIDontWant) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, s := range m.MessageIDs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PeerInfo) Size() (n int) { if m == nil { return 0 @@ -2001,6 +2135,40 @@ func (m *ControlMessage) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &ControlIDontWant{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -2444,6 +2612,89 @@ func (m *ControlPrune) Unmarshal(dAtA []byte) error { } return nil } +func (m *ControlIDontWant) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWant: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWant: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PeerInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pb/rpc.proto b/pb/rpc.proto index e5df8401..bd0234c3 100644 --- a/pb/rpc.proto +++ b/pb/rpc.proto @@ -28,6 +28,7 @@ message ControlMessage { repeated ControlIWant iwant = 2; repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; } message ControlIHave { @@ -51,7 +52,12 @@ message ControlPrune { optional uint64 backoff = 3; } +message ControlIDontWant { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + repeated string messageIDs = 1; +} + message PeerInfo { optional bytes peerID = 1; optional bytes signedPeerRecord = 2; -} \ No newline at end of file +} diff --git a/pb/trace.pb.go b/pb/trace.pb.go index dd806155..9361c393 100644 --- a/pb/trace.pb.go +++ b/pb/trace.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. @@ -1159,13 +1160,14 @@ func (m *TraceEvent_SubMeta) GetTopic() string { } type TraceEvent_ControlMeta struct { - Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*TraceEvent_ControlIDontWantMeta `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TraceEvent_ControlMeta) Reset() { *m = TraceEvent_ControlMeta{} } @@ -1229,6 +1231,13 @@ func (m *TraceEvent_ControlMeta) GetPrune() []*TraceEvent_ControlPruneMeta { return nil } +func (m *TraceEvent_ControlMeta) GetIdontwant() []*TraceEvent_ControlIDontWantMeta { + if m != nil { + return m.Idontwant + } + return nil +} + type TraceEvent_ControlIHaveMeta struct { Topic *string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` MessageIDs [][]byte `protobuf:"bytes,2,rep,name=messageIDs" json:"messageIDs,omitempty"` @@ -1433,6 +1442,53 @@ func (m *TraceEvent_ControlPruneMeta) GetPeers() [][]byte { return nil } +type TraceEvent_ControlIDontWantMeta struct { + MessageIDs [][]byte `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceEvent_ControlIDontWantMeta) Reset() { *m = TraceEvent_ControlIDontWantMeta{} } +func (m *TraceEvent_ControlIDontWantMeta) String() string { return proto.CompactTextString(m) } +func (*TraceEvent_ControlIDontWantMeta) ProtoMessage() {} +func (*TraceEvent_ControlIDontWantMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_0571941a1d628a80, []int{0, 21} +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Merge(m, src) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Size() int { + return m.Size() +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceEvent_ControlIDontWantMeta proto.InternalMessageInfo + +func (m *TraceEvent_ControlIDontWantMeta) GetMessageIDs() [][]byte { + if m != nil { + return m.MessageIDs + } + return nil +} + type TraceEventBatch struct { Batch []*TraceEvent `protobuf:"bytes,1,rep,name=batch" json:"batch,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1504,76 +1560,79 @@ func init() { proto.RegisterType((*TraceEvent_ControlIWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIWantMeta") proto.RegisterType((*TraceEvent_ControlGraftMeta)(nil), "pubsub.pb.TraceEvent.ControlGraftMeta") proto.RegisterType((*TraceEvent_ControlPruneMeta)(nil), "pubsub.pb.TraceEvent.ControlPruneMeta") + proto.RegisterType((*TraceEvent_ControlIDontWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIDontWantMeta") proto.RegisterType((*TraceEventBatch)(nil), "pubsub.pb.TraceEventBatch") } func init() { proto.RegisterFile("trace.proto", fileDescriptor_0571941a1d628a80) } var fileDescriptor_0571941a1d628a80 = []byte{ - // 999 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x51, 0x6f, 0xda, 0x56, - 0x14, 0xc7, 0xe7, 0x00, 0x01, 0x0e, 0x84, 0x78, 0x77, 0x6d, 0x65, 0xb1, 0x36, 0x62, 0x59, 0x55, - 0x21, 0x4d, 0x42, 0x6a, 0xa4, 0xa9, 0x0f, 0x6b, 0xab, 0x11, 0xec, 0x26, 0x44, 0x24, 0xb1, 0x0e, - 0x24, 0x7b, 0xcc, 0x0c, 0xdc, 0x35, 0x8e, 0xc0, 0xb6, 0xec, 0x0b, 0x53, 0x9f, 0xf6, 0xb4, 0xef, - 0xd6, 0xb7, 0xed, 0x23, 0x54, 0xf9, 0x24, 0xd3, 0xbd, 0xd7, 0x36, 0x36, 0xd8, 0xb4, 0x8b, 0xfa, - 0xe6, 0x73, 0xf3, 0xff, 0x9d, 0x7b, 0xce, 0xbd, 0xe7, 0x7f, 0x03, 0xd4, 0x98, 0x6f, 0x4d, 0x68, - 0xc7, 0xf3, 0x5d, 0xe6, 0x92, 0xaa, 0xb7, 0x18, 0x07, 0x8b, 0x71, 0xc7, 0x1b, 0x1f, 0x7e, 0x7a, - 0x02, 0x30, 0xe2, 0x7f, 0x32, 0x96, 0xd4, 0x61, 0xa4, 0x03, 0x45, 0xf6, 0xc1, 0xa3, 0x9a, 0xd2, - 0x52, 0xda, 0x8d, 0xa3, 0x66, 0x27, 0x16, 0x76, 0x56, 0xa2, 0xce, 0xe8, 0x83, 0x47, 0x51, 0xe8, - 0xc8, 0x13, 0xd8, 0xf5, 0x28, 0xf5, 0xfb, 0xba, 0xb6, 0xd3, 0x52, 0xda, 0x75, 0x0c, 0x23, 0xf2, - 0x14, 0xaa, 0xcc, 0x9e, 0xd3, 0x80, 0x59, 0x73, 0x4f, 0x2b, 0xb4, 0x94, 0x76, 0x01, 0x57, 0x0b, - 0x64, 0x00, 0x0d, 0x6f, 0x31, 0x9e, 0xd9, 0xc1, 0xed, 0x39, 0x0d, 0x02, 0xeb, 0x3d, 0xd5, 0x8a, - 0x2d, 0xa5, 0x5d, 0x3b, 0x7a, 0x9e, 0xbd, 0x9f, 0x99, 0xd2, 0xe2, 0x1a, 0x4b, 0xfa, 0xb0, 0xe7, - 0xd3, 0x3b, 0x3a, 0x61, 0x51, 0xb2, 0x92, 0x48, 0xf6, 0x63, 0x76, 0x32, 0x4c, 0x4a, 0x31, 0x4d, - 0x12, 0x04, 0x75, 0xba, 0xf0, 0x66, 0xf6, 0xc4, 0x62, 0x34, 0xca, 0xb6, 0x2b, 0xb2, 0xbd, 0xc8, - 0xce, 0xa6, 0xaf, 0xa9, 0x71, 0x83, 0xe7, 0xcd, 0x4e, 0xe9, 0xcc, 0x5e, 0x52, 0x3f, 0xca, 0x58, - 0xde, 0xd6, 0xac, 0x9e, 0xd2, 0xe2, 0x1a, 0x4b, 0x5e, 0x41, 0xd9, 0x9a, 0x4e, 0x4d, 0x4a, 0x7d, - 0xad, 0x22, 0xd2, 0x3c, 0xcb, 0x4e, 0xd3, 0x95, 0x22, 0x8c, 0xd4, 0xe4, 0x57, 0x00, 0x9f, 0xce, - 0xdd, 0x25, 0x15, 0x6c, 0x55, 0xb0, 0xad, 0xbc, 0x23, 0x8a, 0x74, 0x98, 0x60, 0xf8, 0xd6, 0x3e, - 0x9d, 0x2c, 0xd1, 0xec, 0x69, 0xb0, 0x6d, 0x6b, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x0c, 0xa8, 0x33, - 0xe5, 0x60, 0x6d, 0x1b, 0x38, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x9c, 0xfa, 0xae, 0xc7, 0xc1, 0xfa, - 0x36, 0x50, 0x97, 0x22, 0x8c, 0xd4, 0x7c, 0x8c, 0xef, 0x5c, 0xdb, 0xd1, 0xf6, 0x04, 0x95, 0x33, - 0xc6, 0x67, 0xae, 0xed, 0xa0, 0xd0, 0x91, 0x97, 0x50, 0x9a, 0x51, 0x6b, 0x49, 0xb5, 0x86, 0x00, - 0xbe, 0xcf, 0x06, 0x06, 0x5c, 0x82, 0x52, 0xc9, 0x91, 0xf7, 0xbe, 0xf5, 0x07, 0xd3, 0xf6, 0xb7, - 0x21, 0x27, 0x5c, 0x82, 0x52, 0xc9, 0x11, 0xcf, 0x5f, 0x38, 0x54, 0x53, 0xb7, 0x21, 0x26, 0x97, - 0xa0, 0x54, 0x36, 0x75, 0x68, 0xa4, 0xa7, 0x9f, 0x3b, 0x6b, 0x2e, 0x3f, 0xfb, 0xba, 0xb0, 0x69, - 0x1d, 0x57, 0x0b, 0xe4, 0x11, 0x94, 0x98, 0xeb, 0xd9, 0x13, 0x61, 0xc7, 0x2a, 0xca, 0xa0, 0xf9, - 0x17, 0xec, 0xa5, 0xc6, 0xfe, 0x33, 0x49, 0x0e, 0xa1, 0xee, 0xd3, 0x09, 0xb5, 0x97, 0x74, 0xfa, - 0xce, 0x77, 0xe7, 0xa1, 0xb5, 0x53, 0x6b, 0xdc, 0xf8, 0x3e, 0xb5, 0x02, 0xd7, 0x11, 0xee, 0xae, - 0x62, 0x18, 0xad, 0x0a, 0x28, 0x26, 0x0b, 0xb8, 0x03, 0x75, 0xdd, 0x29, 0x5f, 0xa1, 0x86, 0x78, - 0xaf, 0x42, 0x72, 0xaf, 0x5b, 0x68, 0xa4, 0x3d, 0xf4, 0x90, 0x23, 0xdb, 0xd8, 0xbf, 0xb0, 0xb9, - 0x7f, 0xf3, 0x15, 0x94, 0x43, 0x9b, 0x25, 0xde, 0x41, 0x25, 0xf5, 0x0e, 0x3e, 0xe2, 0x57, 0xee, - 0x32, 0x37, 0x4a, 0x2e, 0x82, 0xe6, 0x73, 0x80, 0x95, 0xc7, 0xf2, 0xd8, 0xe6, 0xef, 0x50, 0x0e, - 0xad, 0xb4, 0x51, 0x8d, 0x92, 0x71, 0x1a, 0x2f, 0xa1, 0x38, 0xa7, 0xcc, 0x12, 0x3b, 0xe5, 0x7b, - 0xd3, 0xec, 0x9d, 0x53, 0x66, 0xa1, 0x90, 0x36, 0x47, 0x50, 0x0e, 0x3d, 0xc7, 0x8b, 0xe0, 0xae, - 0x1b, 0xb9, 0x51, 0x11, 0x32, 0x7a, 0x60, 0xd6, 0xd0, 0x90, 0x5f, 0x33, 0xeb, 0x53, 0x28, 0x72, - 0xc3, 0xae, 0xae, 0x4b, 0x49, 0x5e, 0xfa, 0x33, 0x28, 0x09, 0x77, 0xe6, 0x18, 0xe0, 0x67, 0x28, - 0x09, 0x27, 0x6e, 0xbb, 0xa7, 0x6c, 0x4c, 0xb8, 0xf1, 0x7f, 0x62, 0x1f, 0x15, 0x28, 0x87, 0xc5, - 0x93, 0x37, 0x50, 0x09, 0x47, 0x2d, 0xd0, 0x94, 0x56, 0xa1, 0x5d, 0x3b, 0xfa, 0x21, 0xbb, 0xdb, - 0x70, 0x58, 0x45, 0xc7, 0x31, 0x42, 0xba, 0x50, 0x0f, 0x16, 0xe3, 0x60, 0xe2, 0xdb, 0x1e, 0xb3, - 0x5d, 0x47, 0xdb, 0x11, 0x29, 0xf2, 0xde, 0xcf, 0xc5, 0x58, 0xe0, 0x29, 0x84, 0xfc, 0x02, 0xe5, - 0x89, 0xeb, 0x30, 0xdf, 0x9d, 0x89, 0x21, 0xce, 0x2d, 0xa0, 0x27, 0x45, 0x22, 0x43, 0x44, 0x34, - 0xbb, 0x50, 0x4b, 0x14, 0xf6, 0xa0, 0xc7, 0xe7, 0x0d, 0x94, 0xc3, 0xc2, 0x38, 0x1e, 0x96, 0x36, - 0x96, 0x3f, 0x31, 0x2a, 0xb8, 0x5a, 0xc8, 0xc1, 0xff, 0xde, 0x81, 0x5a, 0xa2, 0x34, 0xf2, 0x1a, - 0x4a, 0xf6, 0x2d, 0x7f, 0xaa, 0xe5, 0x69, 0xbe, 0xd8, 0xda, 0x4c, 0xff, 0xd4, 0x5a, 0xca, 0x23, - 0x95, 0x90, 0xa0, 0xff, 0xb4, 0x1c, 0x16, 0x1e, 0xe4, 0x67, 0xe8, 0xdf, 0x2c, 0x87, 0x85, 0x34, - 0x87, 0x38, 0x2d, 0xdf, 0xfc, 0xc2, 0x17, 0xd0, 0x62, 0xe0, 0x24, 0x2d, 0x9f, 0xff, 0xd7, 0xd1, - 0xf3, 0x5f, 0xfc, 0x02, 0x5a, 0xcc, 0x9d, 0xa4, 0xe5, 0x7f, 0x82, 0x53, 0x50, 0xd7, 0x9b, 0xca, - 0xf6, 0x02, 0x39, 0x00, 0x88, 0xef, 0x24, 0x10, 0x8d, 0xd6, 0x31, 0xb1, 0xd2, 0x3c, 0x5a, 0x65, - 0x8a, 0x1a, 0x5c, 0x63, 0x94, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x5b, 0x39, 0x4e, 0x7c, 0x1b, 0x2b, - 0xe3, 0x16, 0x72, 0xea, 0xe4, 0x6f, 0x23, 0xa5, 0x7e, 0x54, 0xa2, 0x0c, 0x0e, 0xff, 0x51, 0xa0, - 0xc8, 0x7f, 0x60, 0x92, 0xef, 0x60, 0xdf, 0xbc, 0x3a, 0x1e, 0xf4, 0x87, 0xa7, 0x37, 0xe7, 0xc6, - 0x70, 0xd8, 0x3d, 0x31, 0xd4, 0x6f, 0x08, 0x81, 0x06, 0x1a, 0x67, 0x46, 0x6f, 0x14, 0xaf, 0x29, - 0xe4, 0x31, 0x7c, 0xab, 0x5f, 0x99, 0x83, 0x7e, 0xaf, 0x3b, 0x32, 0xe2, 0xe5, 0x1d, 0xce, 0xeb, - 0xc6, 0xa0, 0x7f, 0x6d, 0x60, 0xbc, 0x58, 0x20, 0x75, 0xa8, 0x74, 0x75, 0xfd, 0xc6, 0x34, 0x0c, - 0x54, 0x8b, 0x64, 0x1f, 0x6a, 0x68, 0x9c, 0x5f, 0x5e, 0x1b, 0x72, 0xa1, 0xc4, 0xff, 0x8c, 0x46, - 0xef, 0xfa, 0x06, 0xcd, 0x9e, 0xba, 0xcb, 0xa3, 0xa1, 0x71, 0xa1, 0x8b, 0xa8, 0xcc, 0x23, 0x1d, - 0x2f, 0x4d, 0x11, 0x55, 0x48, 0x05, 0x8a, 0x67, 0x97, 0xfd, 0x0b, 0xb5, 0x4a, 0xaa, 0x50, 0x1a, - 0x18, 0xdd, 0x6b, 0x43, 0x05, 0xfe, 0x79, 0x82, 0xdd, 0x77, 0x23, 0xb5, 0xc6, 0x3f, 0x4d, 0xbc, - 0xba, 0x30, 0xd4, 0xfa, 0xe1, 0x5b, 0xd8, 0x5f, 0xdd, 0xef, 0xb1, 0xc5, 0x26, 0xb7, 0xe4, 0x27, - 0x28, 0x8d, 0xf9, 0x47, 0x38, 0xc4, 0x8f, 0x33, 0x47, 0x01, 0xa5, 0xe6, 0xb8, 0xfe, 0xf1, 0xfe, - 0x40, 0xf9, 0xf7, 0xfe, 0x40, 0xf9, 0x74, 0x7f, 0xa0, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xdb, - 0x3a, 0x1c, 0xe4, 0xc9, 0x0b, 0x00, 0x00, + // 1027 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdf, 0x6e, 0xe2, 0x46, + 0x14, 0xc6, 0xeb, 0x80, 0x03, 0x1c, 0x08, 0x71, 0xa7, 0xd9, 0xd6, 0x72, 0x77, 0x23, 0x9a, 0xae, + 0x56, 0xa8, 0x95, 0x90, 0x36, 0x52, 0xbb, 0x17, 0xdd, 0x5d, 0x95, 0x60, 0x6f, 0x42, 0x44, 0x12, + 0x6b, 0x20, 0xe9, 0x65, 0x6a, 0x60, 0xba, 0x71, 0x04, 0xb6, 0x65, 0x0f, 0x54, 0x7b, 0xd5, 0xd7, + 0xdb, 0xbb, 0xed, 0x23, 0x54, 0x79, 0x92, 0x6a, 0x66, 0xfc, 0x07, 0x83, 0xed, 0xec, 0x46, 0xb9, + 0xf3, 0x19, 0xbe, 0xdf, 0x99, 0x33, 0x67, 0xce, 0x37, 0x02, 0xea, 0xd4, 0xb7, 0x26, 0xa4, 0xe3, + 0xf9, 0x2e, 0x75, 0x51, 0xcd, 0x5b, 0x8c, 0x83, 0xc5, 0xb8, 0xe3, 0x8d, 0x0f, 0xee, 0xbe, 0x03, + 0x18, 0xb1, 0x9f, 0x8c, 0x25, 0x71, 0x28, 0xea, 0x40, 0x99, 0x7e, 0xf0, 0x88, 0x2a, 0xb5, 0xa4, + 0x76, 0xf3, 0x50, 0xeb, 0xc4, 0xc2, 0x4e, 0x22, 0xea, 0x8c, 0x3e, 0x78, 0x04, 0x73, 0x1d, 0xfa, + 0x16, 0xb6, 0x3d, 0x42, 0xfc, 0xbe, 0xae, 0x6e, 0xb5, 0xa4, 0x76, 0x03, 0x87, 0x11, 0x7a, 0x0a, + 0x35, 0x6a, 0xcf, 0x49, 0x40, 0xad, 0xb9, 0xa7, 0x96, 0x5a, 0x52, 0xbb, 0x84, 0x93, 0x05, 0x34, + 0x80, 0xa6, 0xb7, 0x18, 0xcf, 0xec, 0xe0, 0xe6, 0x8c, 0x04, 0x81, 0xf5, 0x9e, 0xa8, 0xe5, 0x96, + 0xd4, 0xae, 0x1f, 0x3e, 0xcf, 0xde, 0xcf, 0x4c, 0x69, 0xf1, 0x1a, 0x8b, 0xfa, 0xb0, 0xe3, 0x93, + 0x5b, 0x32, 0xa1, 0x51, 0x32, 0x99, 0x27, 0xfb, 0x31, 0x3b, 0x19, 0x5e, 0x95, 0xe2, 0x34, 0x89, + 0x30, 0x28, 0xd3, 0x85, 0x37, 0xb3, 0x27, 0x16, 0x25, 0x51, 0xb6, 0x6d, 0x9e, 0xed, 0x45, 0x76, + 0x36, 0x7d, 0x4d, 0x8d, 0x37, 0x78, 0x76, 0xd8, 0x29, 0x99, 0xd9, 0x4b, 0xe2, 0x47, 0x19, 0x2b, + 0x45, 0x87, 0xd5, 0x53, 0x5a, 0xbc, 0xc6, 0xa2, 0x57, 0x50, 0xb1, 0xa6, 0x53, 0x93, 0x10, 0x5f, + 0xad, 0xf2, 0x34, 0xcf, 0xb2, 0xd3, 0x74, 0x85, 0x08, 0x47, 0x6a, 0xf4, 0x3b, 0x80, 0x4f, 0xe6, + 0xee, 0x92, 0x70, 0xb6, 0xc6, 0xd9, 0x56, 0x5e, 0x8b, 0x22, 0x1d, 0x5e, 0x61, 0xd8, 0xd6, 0x3e, + 0x99, 0x2c, 0xb1, 0xd9, 0x53, 0xa1, 0x68, 0x6b, 0x2c, 0x44, 0x38, 0x52, 0x33, 0x30, 0x20, 0xce, + 0x94, 0x81, 0xf5, 0x22, 0x70, 0x28, 0x44, 0x38, 0x52, 0x33, 0x70, 0xea, 0xbb, 0x1e, 0x03, 0x1b, + 0x45, 0xa0, 0x2e, 0x44, 0x38, 0x52, 0xb3, 0x31, 0xbe, 0x75, 0x6d, 0x47, 0xdd, 0xe1, 0x54, 0xce, + 0x18, 0x9f, 0xba, 0xb6, 0x83, 0xb9, 0x0e, 0xbd, 0x04, 0x79, 0x46, 0xac, 0x25, 0x51, 0x9b, 0x1c, + 0xf8, 0x3e, 0x1b, 0x18, 0x30, 0x09, 0x16, 0x4a, 0x86, 0xbc, 0xf7, 0xad, 0xbf, 0xa8, 0xba, 0x5b, + 0x84, 0x1c, 0x33, 0x09, 0x16, 0x4a, 0x86, 0x78, 0xfe, 0xc2, 0x21, 0xaa, 0x52, 0x84, 0x98, 0x4c, + 0x82, 0x85, 0x52, 0xd3, 0xa1, 0x99, 0x9e, 0x7e, 0xe6, 0xac, 0xb9, 0xf8, 0xec, 0xeb, 0xdc, 0xa6, + 0x0d, 0x9c, 0x2c, 0xa0, 0x3d, 0x90, 0xa9, 0xeb, 0xd9, 0x13, 0x6e, 0xc7, 0x1a, 0x16, 0x81, 0xf6, + 0x0f, 0xec, 0xa4, 0xc6, 0xfe, 0x9e, 0x24, 0x07, 0xd0, 0xf0, 0xc9, 0x84, 0xd8, 0x4b, 0x32, 0x7d, + 0xe7, 0xbb, 0xf3, 0xd0, 0xda, 0xa9, 0x35, 0x66, 0x7c, 0x9f, 0x58, 0x81, 0xeb, 0x70, 0x77, 0xd7, + 0x70, 0x18, 0x25, 0x05, 0x94, 0x57, 0x0b, 0xb8, 0x05, 0x65, 0xdd, 0x29, 0x8f, 0x50, 0x43, 0xbc, + 0x57, 0x69, 0x75, 0xaf, 0x1b, 0x68, 0xa6, 0x3d, 0xf4, 0x90, 0x96, 0x6d, 0xec, 0x5f, 0xda, 0xdc, + 0x5f, 0x7b, 0x05, 0x95, 0xd0, 0x66, 0x2b, 0xef, 0xa0, 0x94, 0x7a, 0x07, 0xf7, 0xd8, 0x95, 0xbb, + 0xd4, 0x8d, 0x92, 0xf3, 0x40, 0x7b, 0x0e, 0x90, 0x78, 0x2c, 0x8f, 0xd5, 0xfe, 0x84, 0x4a, 0x68, + 0xa5, 0x8d, 0x6a, 0xa4, 0x8c, 0x6e, 0xbc, 0x84, 0xf2, 0x9c, 0x50, 0x8b, 0xef, 0x94, 0xef, 0x4d, + 0xb3, 0x77, 0x46, 0xa8, 0x85, 0xb9, 0x54, 0x1b, 0x41, 0x25, 0xf4, 0x1c, 0x2b, 0x82, 0xb9, 0x6e, + 0xe4, 0x46, 0x45, 0x88, 0xe8, 0x81, 0x59, 0x43, 0x43, 0x3e, 0x66, 0xd6, 0xa7, 0x50, 0x66, 0x86, + 0x4d, 0xae, 0x4b, 0x5a, 0xbd, 0xf4, 0x67, 0x20, 0x73, 0x77, 0xe6, 0x18, 0xe0, 0x17, 0x90, 0xb9, + 0x13, 0x8b, 0xee, 0x29, 0x1b, 0xe3, 0x6e, 0xfc, 0x42, 0xec, 0xa3, 0x04, 0x95, 0xb0, 0x78, 0xf4, + 0x06, 0xaa, 0xe1, 0xa8, 0x05, 0xaa, 0xd4, 0x2a, 0xb5, 0xeb, 0x87, 0x3f, 0x64, 0x9f, 0x36, 0x1c, + 0x56, 0x7e, 0xe2, 0x18, 0x41, 0x5d, 0x68, 0x04, 0x8b, 0x71, 0x30, 0xf1, 0x6d, 0x8f, 0xda, 0xae, + 0xa3, 0x6e, 0xf1, 0x14, 0x79, 0xef, 0xe7, 0x62, 0xcc, 0xf1, 0x14, 0x82, 0x7e, 0x83, 0xca, 0xc4, + 0x75, 0xa8, 0xef, 0xce, 0xf8, 0x10, 0xe7, 0x16, 0xd0, 0x13, 0x22, 0x9e, 0x21, 0x22, 0xb4, 0x2e, + 0xd4, 0x57, 0x0a, 0x7b, 0xd0, 0xe3, 0xf3, 0x06, 0x2a, 0x61, 0x61, 0x0c, 0x0f, 0x4b, 0x1b, 0x8b, + 0xbf, 0x18, 0x55, 0x9c, 0x2c, 0xe4, 0xe0, 0x9f, 0xb6, 0xa0, 0xbe, 0x52, 0x1a, 0x7a, 0x0d, 0xb2, + 0x7d, 0xc3, 0x9e, 0x6a, 0xd1, 0xcd, 0x17, 0x85, 0x87, 0xe9, 0x9f, 0x58, 0x4b, 0xd1, 0x52, 0x01, + 0x71, 0xfa, 0x6f, 0xcb, 0xa1, 0x61, 0x23, 0xef, 0xa1, 0xff, 0xb0, 0x1c, 0x1a, 0xd2, 0x0c, 0x62, + 0xb4, 0x78, 0xf3, 0x4b, 0x9f, 0x41, 0xf3, 0x81, 0x13, 0xb4, 0x78, 0xfe, 0x5f, 0x47, 0xcf, 0x7f, + 0xf9, 0x33, 0x68, 0x3e, 0x77, 0x82, 0xe6, 0x10, 0x3a, 0x81, 0x9a, 0x3d, 0x75, 0x1d, 0xca, 0xab, + 0x97, 0x79, 0x86, 0x9f, 0x8a, 0xab, 0xd7, 0x5d, 0x87, 0xc6, 0x27, 0x48, 0x60, 0xed, 0x04, 0x94, + 0xf5, 0xf6, 0x64, 0xbb, 0x0a, 0xed, 0x03, 0xc4, 0xb7, 0x1b, 0xf0, 0x96, 0x35, 0xf0, 0xca, 0x8a, + 0x76, 0x98, 0x64, 0x8a, 0x36, 0x5a, 0x63, 0xa4, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x83, 0x72, 0x3c, + 0xfd, 0x36, 0x56, 0xc6, 0xcd, 0xc8, 0xa9, 0x93, 0xbd, 0xb2, 0x84, 0xf8, 0x51, 0x89, 0x22, 0xd0, + 0x7e, 0x85, 0xbd, 0xac, 0x56, 0xdc, 0x57, 0xe1, 0xc1, 0x27, 0x09, 0xca, 0xec, 0x2f, 0x2e, 0xfa, + 0x06, 0x76, 0xcd, 0xcb, 0xa3, 0x41, 0x7f, 0x78, 0x72, 0x7d, 0x66, 0x0c, 0x87, 0xdd, 0x63, 0x43, + 0xf9, 0x0a, 0x21, 0x68, 0x62, 0xe3, 0xd4, 0xe8, 0x8d, 0xe2, 0x35, 0x09, 0x3d, 0x81, 0xaf, 0xf5, + 0x4b, 0x73, 0xd0, 0xef, 0x75, 0x47, 0x46, 0xbc, 0xbc, 0xc5, 0x78, 0xdd, 0x18, 0xf4, 0xaf, 0x0c, + 0x1c, 0x2f, 0x96, 0x50, 0x03, 0xaa, 0x5d, 0x5d, 0xbf, 0x36, 0x0d, 0x03, 0x2b, 0x65, 0xb4, 0x0b, + 0x75, 0x6c, 0x9c, 0x5d, 0x5c, 0x19, 0x62, 0x41, 0x66, 0x3f, 0x63, 0xa3, 0x77, 0x75, 0x8d, 0xcd, + 0x9e, 0xb2, 0xcd, 0xa2, 0xa1, 0x71, 0xae, 0xf3, 0xa8, 0xc2, 0x22, 0x1d, 0x5f, 0x98, 0x3c, 0xaa, + 0xa2, 0x2a, 0x94, 0x4f, 0x2f, 0xfa, 0xe7, 0x4a, 0x0d, 0xd5, 0x40, 0x1e, 0x18, 0xdd, 0x2b, 0x43, + 0x01, 0xf6, 0x79, 0x8c, 0xbb, 0xef, 0x46, 0x4a, 0x9d, 0x7d, 0x9a, 0xf8, 0xf2, 0xdc, 0x50, 0x1a, + 0x07, 0x6f, 0x61, 0x37, 0x99, 0x8f, 0x23, 0x8b, 0x4e, 0x6e, 0xd0, 0xcf, 0x20, 0x8f, 0xd9, 0x47, + 0x68, 0xa3, 0x27, 0x99, 0xa3, 0x84, 0x85, 0xe6, 0xa8, 0xf1, 0xf1, 0x6e, 0x5f, 0xfa, 0xf7, 0x6e, + 0x5f, 0xfa, 0xef, 0x6e, 0x5f, 0xfa, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x17, 0x7f, 0xbd, 0x0d, 0x4b, + 0x0c, 0x00, 0x00, } func (m *TraceEvent) Marshal() (dAtA []byte, err error) { @@ -2509,6 +2568,20 @@ func (m *TraceEvent_ControlMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -2724,6 +2797,42 @@ func (m *TraceEvent_ControlPruneMeta) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *TraceEvent_ControlIDontWantMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintTrace(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TraceEventBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3211,6 +3320,12 @@ func (m *TraceEvent_ControlMeta) Size() (n int) { n += 1 + l + sovTrace(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3295,6 +3410,24 @@ func (m *TraceEvent_ControlPruneMeta) Size() (n int) { return n } +func (m *TraceEvent_ControlIDontWantMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, b := range m.MessageIDs { + l = len(b) + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TraceEventBatch) Size() (n int) { if m == nil { return 0 @@ -6032,6 +6165,40 @@ func (m *TraceEvent_ControlMeta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &TraceEvent_ControlIDontWantMeta{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) @@ -6453,6 +6620,89 @@ func (m *TraceEvent_ControlPruneMeta) Unmarshal(dAtA []byte) error { } return nil } +func (m *TraceEvent_ControlIDontWantMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWantMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWantMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, make([]byte, postIndex-iNdEx)) + copy(m.MessageIDs[len(m.MessageIDs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TraceEventBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pb/trace.proto b/pb/trace.proto index 7f834020..5ee8401c 100644 --- a/pb/trace.proto +++ b/pb/trace.proto @@ -124,6 +124,7 @@ message TraceEvent { repeated ControlIWantMeta iwant = 2; repeated ControlGraftMeta graft = 3; repeated ControlPruneMeta prune = 4; + repeated ControlIDontWantMeta idontwant = 5; } message ControlIHaveMeta { @@ -143,6 +144,10 @@ message TraceEvent { optional string topic = 1; repeated bytes peers = 2; } + + message ControlIDontWantMeta { + repeated bytes messageIDs = 1; + } } message TraceEventBatch { diff --git a/peer_notify.go b/peer_notify.go new file mode 100644 index 00000000..44aceeef --- /dev/null +++ b/peer_notify.go @@ -0,0 +1,112 @@ +package pubsub + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func (ps *PubSub) watchForNewPeers(ctx context.Context) { + // We don't bother subscribing to "connectivity" events because we always run identify after + // every new connection. + sub, err := ps.host.EventBus().Subscribe([]interface{}{ + &event.EvtPeerIdentificationCompleted{}, + &event.EvtPeerProtocolsUpdated{}, + }) + if err != nil { + log.Errorf("failed to subscribe to peer identification events: %v", err) + return + } + defer sub.Close() + + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + for _, pid := range ps.host.Network().Peers() { + if ps.host.Network().Connectedness(pid) != network.Connected { + continue + } + ps.newPeersPend[pid] = struct{}{} + } + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } + + var supportsProtocol func(protocol.ID) bool + if ps.protoMatchFunc != nil { + var supportedProtocols []func(protocol.ID) bool + for _, proto := range ps.rt.Protocols() { + + supportedProtocols = append(supportedProtocols, ps.protoMatchFunc(proto)) + } + supportsProtocol = func(proto protocol.ID) bool { + for _, fn := range supportedProtocols { + if (fn)(proto) { + return true + } + } + return false + } + } else { + supportedProtocols := make(map[protocol.ID]struct{}) + for _, proto := range ps.rt.Protocols() { + supportedProtocols[proto] = struct{}{} + } + supportsProtocol = func(proto protocol.ID) bool { + _, ok := supportedProtocols[proto] + return ok + } + } + + for ctx.Err() == nil { + var ev any + select { + case <-ctx.Done(): + return + case ev = <-sub.Out(): + } + + var protos []protocol.ID + var peer peer.ID + switch ev := ev.(type) { + case event.EvtPeerIdentificationCompleted: + peer = ev.Peer + protos = ev.Protocols + case event.EvtPeerProtocolsUpdated: + peer = ev.Peer + protos = ev.Added + default: + continue + } + + // We don't bother checking connectivity (connected and non-"limited") here because + // we'll check when actually handling the new peer. + + for _, p := range protos { + if supportsProtocol(p) { + ps.notifyNewPeer(peer) + break + } + } + } + +} + +func (ps *PubSub) notifyNewPeer(peer peer.ID) { + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + ps.newPeersPend[peer] = struct{}{} + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } +} diff --git a/pubsub.go b/pubsub.go index c4ecae65..3ca14abb 100644 --- a/pubsub.go +++ b/pubsub.go @@ -147,7 +147,7 @@ type PubSub struct { blacklist Blacklist blacklistPeer chan peer.ID - peers map[peer.ID]chan *RPC + peers map[peer.ID]*rpcQueue inboundStreamsMx sync.Mutex inboundStreams map[peer.ID]network.Stream @@ -196,11 +196,14 @@ type PubSubRouter interface { // EnoughPeers returns whether the router needs more peers before it's ready to publish new records. // Suggested (if greater than 0) is a suggested number of peers that the router should need. EnoughPeers(topic string, suggested int) bool - // AcceptFrom is invoked on any incoming message before pushing it to the validation pipeline + // AcceptFrom is invoked on any RPC envelope before pushing it to the validation pipeline // or processing control information. // Allows routers with internal scoring to vet peers before committing any processing resources // to the message and implement an effective graylist and react to validation queue overload. AcceptFrom(peer.ID) AcceptStatus + // PreValidation is invoked on messages in the RPC envelope right before pushing it to + // the validation pipeline + PreValidation([]*Message) // HandleRPC is invoked to process control messages in the RPC envelope. // It is invoked after subscriptions and payload messages have been processed. HandleRPC(*RPC) @@ -285,7 +288,7 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option mySubs: make(map[string]map[*Subscription]struct{}), myRelays: make(map[string]int), topics: make(map[string]map[peer.ID]struct{}), - peers: make(map[peer.ID]chan *RPC), + peers: make(map[peer.ID]*rpcQueue), inboundStreams: make(map[peer.ID]network.Stream), blacklist: NewMapBlacklist(), blacklistPeer: make(chan peer.ID), @@ -327,14 +330,12 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option h.SetStreamHandler(id, ps.handleNewStream) } } - h.Network().Notify((*PubSubNotif)(ps)) + go ps.watchForNewPeers(ctx) ps.val.Start(ps) go ps.processLoop(ctx) - (*PubSubNotif)(ps).Initialize() - return ps, nil } @@ -561,8 +562,8 @@ func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option { func (p *PubSub) processLoop(ctx context.Context) { defer func() { // Clean up go routines. - for _, ch := range p.peers { - close(ch) + for _, queue := range p.peers { + queue.Close() } p.peers = nil p.topics = nil @@ -577,7 +578,7 @@ func (p *PubSub) processLoop(ctx context.Context) { case s := <-p.newPeerStream: pid := s.Conn().RemotePeer() - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { log.Warn("new stream for unknown peer: ", pid) s.Reset() @@ -586,7 +587,7 @@ func (p *PubSub) processLoop(ctx context.Context) { if p.blacklist.Contains(pid) { log.Warn("closing stream for blacklisted peer: ", pid) - close(ch) + q.Close() delete(p.peers, pid) s.Reset() continue @@ -654,9 +655,9 @@ func (p *PubSub) processLoop(ctx context.Context) { log.Infof("Blacklisting peer %s", pid) p.blacklist.Add(pid) - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if ok { - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { if _, ok := tmap[pid]; ok { @@ -687,6 +688,8 @@ func (p *PubSub) handlePendingPeers() { p.newPeersPrioLk.Unlock() for pid := range newPeers { + // Make sure we have a non-limited connection. We do this late because we may have + // disconnected in the meantime. if p.host.Network().Connectedness(pid) != network.Connected { continue } @@ -701,10 +704,10 @@ func (p *PubSub) handlePendingPeers() { continue } - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - go p.handleNewPeer(p.ctx, pid, messages) - p.peers[pid] = messages + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + go p.handleNewPeer(p.ctx, pid, rpcQueue) + p.peers[pid] = rpcQueue } } @@ -721,12 +724,12 @@ func (p *PubSub) handleDeadPeers() { p.peerDeadPrioLk.Unlock() for pid := range deadPeers { - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { continue } - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { @@ -748,10 +751,10 @@ func (p *PubSub) handleDeadPeers() { // still connected, must be a duplicate connection being closed. // we respawn the writer as we need to ensure there is a stream active log.Debugf("peer declared dead but still connected; respawning writer: %s", pid) - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - p.peers[pid] = messages - go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages) + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + p.peers[pid] = rpcQueue + go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, rpcQueue) } } } @@ -915,14 +918,14 @@ func (p *PubSub) announce(topic string, sub bool) { out := rpcWithSubs(subopt) for pid, peer := range p.peers { - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + continue } + p.tracer.SendRPC(out, pid) } } @@ -958,14 +961,14 @@ func (p *PubSub) doAnnounceRetry(pid peer.ID, topic string, sub bool) { } out := rpcWithSubs(subopt) - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + return } + p.tracer.SendRPC(out, pid) } // notifySubs sends a given message to all corresponding subscribers. @@ -1091,13 +1094,21 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) { p.tracer.ThrottlePeer(rpc.from) case AcceptAll: + var toPush []*Message for _, pmsg := range rpc.GetPublish() { if !(p.subscribedToMsg(pmsg) || p.canRelayMsg(pmsg)) { log.Debug("received message in topic we didn't subscribe to; ignoring message") continue } - p.pushMsg(&Message{pmsg, "", rpc.from, nil, false}) + msg := &Message{pmsg, "", rpc.from, nil, false} + if p.shouldPush(msg) { + toPush = append(toPush, msg) + } + } + p.rt.PreValidation(toPush) + for _, msg := range toPush { + p.pushMsg(msg) } } @@ -1114,27 +1125,28 @@ func DefaultPeerFilter(pid peer.ID, topic string) bool { return true } -// pushMsg pushes a message performing validation as necessary -func (p *PubSub) pushMsg(msg *Message) { +// shouldPush filters a message before validating and pushing it +// It returns true if the message can be further validated and pushed +func (p *PubSub) shouldPush(msg *Message) bool { src := msg.ReceivedFrom // reject messages from blacklisted peers if p.blacklist.Contains(src) { log.Debugf("dropping message from blacklisted peer %s", src) p.tracer.RejectMessage(msg, RejectBlacklstedPeer) - return + return false } // even if they are forwarded by good peers if p.blacklist.Contains(msg.GetFrom()) { log.Debugf("dropping message from blacklisted source %s", src) p.tracer.RejectMessage(msg, RejectBlacklistedSource) - return + return false } err := p.checkSigningPolicy(msg) if err != nil { log.Debugf("dropping message from %s: %s", src, err) - return + return false } // reject messages claiming to be from ourselves but not locally published @@ -1142,16 +1154,24 @@ func (p *PubSub) pushMsg(msg *Message) { if peer.ID(msg.GetFrom()) == self && src != self { log.Debugf("dropping message claiming to be from self but forwarded from %s", src) p.tracer.RejectMessage(msg, RejectSelfOrigin) - return + return false } // have we already seen and validated this message? id := p.idGen.ID(msg) if p.seenMessage(id) { p.tracer.DuplicateMessage(msg) - return + return false } + return true +} + +// pushMsg pushes a message performing validation as necessary +func (p *PubSub) pushMsg(msg *Message) { + src := msg.ReceivedFrom + id := p.idGen.ID(msg) + if !p.val.Push(src, msg) { return } diff --git a/pubsub_test.go b/pubsub_test.go index 4a033159..245a69df 100644 --- a/pubsub_test.go +++ b/pubsub_test.go @@ -4,13 +4,32 @@ import ( "context" "testing" "time" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" ) +func getDefaultHosts(t *testing.T, n int) []host.Host { + var out []host.Host + + for i := 0; i < n; i++ { + h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { h.Close() }) + out = append(out, h) + } + + return out +} + // See https://github.com/libp2p/go-libp2p-pubsub/issues/426 func TestPubSubRemovesBlacklistedPeer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) bl := NewMapBlacklist() diff --git a/randomsub.go b/randomsub.go index f29b923f..4e410f5f 100644 --- a/randomsub.go +++ b/randomsub.go @@ -94,6 +94,8 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (rs *RandomSubRouter) PreValidation([]*Message) {} + func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} func (rs *RandomSubRouter) Publish(msg *Message) { @@ -144,18 +146,18 @@ func (rs *RandomSubRouter) Publish(msg *Message) { out := rpcWithMessages(msg.Message) for p := range tosend { - mch, ok := rs.p.peers[p] + q, ok := rs.p.peers[p] if !ok { continue } - select { - case mch <- out: - rs.tracer.SendRPC(out, p) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", p) rs.tracer.DropRPC(out, p) + continue } + rs.tracer.SendRPC(out, p) } } diff --git a/randomsub_test.go b/randomsub_test.go index 8eb640ea..5c817b7c 100644 --- a/randomsub_test.go +++ b/randomsub_test.go @@ -40,7 +40,7 @@ func TestRandomsubSmall(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getRandomsubs(ctx, hosts, 10) connectAll(t, hosts) @@ -77,7 +77,7 @@ func TestRandomsubBig(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 50) + hosts := getDefaultHosts(t, 50) psubs := getRandomsubs(ctx, hosts, 50) connectSome(t, hosts, 12) @@ -114,7 +114,7 @@ func TestRandomsubMixed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 40) + hosts := getDefaultHosts(t, 40) fsubs := getPubsubs(ctx, hosts[:10]) rsubs := getRandomsubs(ctx, hosts[10:], 30) psubs := append(fsubs, rsubs...) @@ -153,7 +153,7 @@ func TestRandomsubEnoughPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 40) + hosts := getDefaultHosts(t, 40) fsubs := getPubsubs(ctx, hosts[:10]) rsubs := getRandomsubs(ctx, hosts[10:], 30) psubs := append(fsubs, rsubs...) diff --git a/rpc_queue.go b/rpc_queue.go new file mode 100644 index 00000000..e5c22935 --- /dev/null +++ b/rpc_queue.go @@ -0,0 +1,147 @@ +package pubsub + +import ( + "context" + "errors" + "sync" +) + +var ( + ErrQueueCancelled = errors.New("rpc queue operation cancelled") + ErrQueueClosed = errors.New("rpc queue closed") + ErrQueueFull = errors.New("rpc queue full") + ErrQueuePushOnClosed = errors.New("push on closed rpc queue") +) + +type priorityQueue struct { + normal []*RPC + priority []*RPC +} + +func (q *priorityQueue) Len() int { + return len(q.normal) + len(q.priority) +} + +func (q *priorityQueue) NormalPush(rpc *RPC) { + q.normal = append(q.normal, rpc) +} + +func (q *priorityQueue) PriorityPush(rpc *RPC) { + q.priority = append(q.priority, rpc) +} + +func (q *priorityQueue) Pop() *RPC { + var rpc *RPC + + if len(q.priority) > 0 { + rpc = q.priority[0] + q.priority[0] = nil + q.priority = q.priority[1:] + } else if len(q.normal) > 0 { + rpc = q.normal[0] + q.normal[0] = nil + q.normal = q.normal[1:] + } + + return rpc +} + +type rpcQueue struct { + dataAvailable sync.Cond + spaceAvailable sync.Cond + // Mutex used to access queue + queueMu sync.Mutex + queue priorityQueue + + closed bool + maxSize int +} + +func newRpcQueue(maxSize int) *rpcQueue { + q := &rpcQueue{maxSize: maxSize} + q.dataAvailable.L = &q.queueMu + q.spaceAvailable.L = &q.queueMu + return q +} + +func (q *rpcQueue) Push(rpc *RPC, block bool) error { + return q.push(rpc, false, block) +} + +func (q *rpcQueue) UrgentPush(rpc *RPC, block bool) error { + return q.push(rpc, true, block) +} + +func (q *rpcQueue) push(rpc *RPC, urgent bool, block bool) error { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + panic(ErrQueuePushOnClosed) + } + + for q.queue.Len() == q.maxSize { + if block { + q.spaceAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + panic(ErrQueuePushOnClosed) + } + } else { + return ErrQueueFull + } + } + if urgent { + q.queue.PriorityPush(rpc) + } else { + q.queue.NormalPush(rpc) + } + + q.dataAvailable.Signal() + return nil +} + +// Note that, when the queue is empty and there are two blocked Pop calls, it +// doesn't mean that the first Pop will get the item from the next Push. The +// second Pop will probably get it instead. +func (q *rpcQueue) Pop(ctx context.Context) (*RPC, error) { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + return nil, ErrQueueClosed + } + + unregisterAfterFunc := context.AfterFunc(ctx, func() { + // Wake up all the waiting routines. The only routine that correponds + // to this Pop call will return from the function. Note that this can + // be expensive, if there are too many waiting routines. + q.dataAvailable.Broadcast() + }) + defer unregisterAfterFunc() + + for q.queue.Len() == 0 { + select { + case <-ctx.Done(): + return nil, ErrQueueCancelled + default: + } + q.dataAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + return nil, ErrQueueClosed + } + } + rpc := q.queue.Pop() + q.spaceAvailable.Signal() + return rpc, nil +} + +func (q *rpcQueue) Close() { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + q.closed = true + q.dataAvailable.Broadcast() + q.spaceAvailable.Broadcast() +} diff --git a/rpc_queue_test.go b/rpc_queue_test.go new file mode 100644 index 00000000..6e92ee56 --- /dev/null +++ b/rpc_queue_test.go @@ -0,0 +1,229 @@ +package pubsub + +import ( + "context" + "testing" + "time" +) + +func TestNewRpcQueue(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + if q.maxSize != maxSize { + t.Fatalf("rpc queue has wrong max size, expected %d but got %d", maxSize, q.maxSize) + } + if q.dataAvailable.L != &q.queueMu { + t.Fatalf("the dataAvailable field of rpc queue has an incorrect mutex") + } + if q.spaceAvailable.L != &q.queueMu { + t.Fatalf("the spaceAvailable field of rpc queue has an incorrect mutex") + } +} + +func TestRpcQueueUrgentPush(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + rpc3 := &RPC{} + rpc4 := &RPC{} + q.Push(rpc1, true) + q.UrgentPush(rpc2, true) + q.Push(rpc3, true) + q.UrgentPush(rpc4, true) + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop3, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop4, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc4 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop3 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop4 != rpc3 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueuePushThenPop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + q.Push(rpc1, true) + q.Push(rpc2, true) + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueuePopThenPush(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Push(rpc1, true) + q.Push(rpc2, true) + }() + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueueBlockPushWhenFull(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + finished := make(chan struct{}) + q.Push(&RPC{}, true) + go func() { + q.Push(&RPC{}, true) + finished <- struct{}{} + }() + // Wait to make sure the goroutine is blocked. + time.Sleep(1 * time.Millisecond) + select { + case <-finished: + t.Fatalf("blocking rpc queue Push is not blocked when it is full") + default: + } +} + +func TestRpcQueueNonblockPushWhenFull(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + q.Push(&RPC{}, true) + err := q.Push(&RPC{}, false) + if err != ErrQueueFull { + t.Fatalf("non-blocking rpc queue Push returns wrong error when it is full") + } +} + +func TestRpcQueuePushAfterClose(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + q.Close() + + defer func() { + if r := recover(); r == nil { + t.Fatalf("rpc queue Push does not panick after closed") + } + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueuePopAfterClose(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + q.Close() + _, err := q.Pop(context.Background()) + if err != ErrQueueClosed { + t.Fatalf("rpc queue Pop returns wrong error after closed") + } +} + +func TestRpcQueueCloseWhilePush(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + q.Push(&RPC{}, true) + + defer func() { + if r := recover(); r == nil { + t.Fatalf("rpc queue Push does not panick when it's closed on the fly") + } + }() + + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Close() + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueueCloseWhilePop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Close() + }() + _, err := q.Pop(context.Background()) + if err != ErrQueueClosed { + t.Fatalf("rpc queue Pop returns wrong error when it's closed on the fly") + } +} + +func TestRpcQueuePushWhenFullThenPop(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + q.Push(&RPC{}, true) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Pop(context.Background()) + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueueCancelPop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + cancel() + }() + _, err := q.Pop(ctx) + if err != ErrQueueCancelled { + t.Fatalf("rpc queue Pop returns wrong error when it's cancelled") + } +} diff --git a/subscription_filter_test.go b/subscription_filter_test.go index 8a4fe4db..0057cdcf 100644 --- a/subscription_filter_test.go +++ b/subscription_filter_test.go @@ -19,15 +19,15 @@ func TestBasicSubscriptionFilter(t *testing.T) { topic3 := "test3" yes := true subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic3, Subscribe: &yes, }, @@ -108,24 +108,24 @@ func TestSubscriptionFilterDeduplication(t *testing.T) { yes := true no := false subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &no, }, - &pb.RPC_SubOpts{ + { Topicid: &topic3, Subscribe: &yes, }, @@ -150,7 +150,7 @@ func TestSubscriptionFilterRPC(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps1 := getPubsub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter("test1", "test2"))) ps2 := getPubsub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter("test2", "test3"))) diff --git a/timecache/time_cache.go b/timecache/time_cache.go index e33bc354..ee34fd5b 100644 --- a/timecache/time_cache.go +++ b/timecache/time_cache.go @@ -2,12 +2,8 @@ package timecache import ( "time" - - logger "github.com/ipfs/go-log/v2" ) -var log = logger.Logger("pubsub/timecache") - // Stategy is the TimeCache expiration strategy to use. type Strategy uint8 diff --git a/topic_test.go b/topic_test.go index 9ad3146d..ef05feb4 100644 --- a/topic_test.go +++ b/topic_test.go @@ -99,7 +99,7 @@ func testTopicCloseWithOpenResource(t *testing.T, openResource func(topic *Topic const numHosts = 1 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Try create and cancel topic @@ -139,7 +139,7 @@ func TestTopicReuse(t *testing.T) { const numHosts = 2 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) sender := getPubsub(ctx, hosts[0], WithDiscovery(&dummyDiscovery{})) receiver := getPubsub(ctx, hosts[1]) @@ -233,7 +233,7 @@ func TestTopicEventHandlerCancel(t *testing.T) { const numHosts = 5 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Try create and cancel topic @@ -265,7 +265,7 @@ func TestSubscriptionJoinNotification(t *testing.T) { const numLateSubscribers = 10 const numHosts = 20 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), "foobar") evts := getTopicEvts(topics) @@ -331,7 +331,7 @@ func TestSubscriptionLeaveNotification(t *testing.T) { defer cancel() const numHosts = 20 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := getPubsubs(ctx, hosts) topics := getTopics(psubs, "foobar") evts := getTopicEvts(topics) @@ -416,7 +416,7 @@ func TestSubscriptionManyNotifications(t *testing.T) { const topic = "foobar" const numHosts = 33 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) evts := getTopicEvts(topics) @@ -521,7 +521,7 @@ func TestSubscriptionNotificationSubUnSub(t *testing.T) { const topic = "foobar" const numHosts = 35 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) for i := 1; i < numHosts; i++ { @@ -539,7 +539,7 @@ func TestTopicRelay(t *testing.T) { const topic = "foobar" const numHosts = 5 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) // [0.Rel] - [1.Rel] - [2.Sub] @@ -603,7 +603,7 @@ func TestTopicRelayReuse(t *testing.T) { const topic = "foobar" const numHosts = 1 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) pubsubs := getPubsubs(ctx, hosts) topics := getTopics(pubsubs, topic) @@ -670,7 +670,7 @@ func TestTopicRelayOnClosedTopic(t *testing.T) { const topic = "foobar" const numHosts = 1 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) err := topics[0].Close() @@ -690,7 +690,7 @@ func TestProducePanic(t *testing.T) { const numHosts = 5 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Create topic @@ -743,7 +743,7 @@ func notifSubThenUnSub(ctx context.Context, t *testing.T, topics []*Topic) { } // Wait for the unsubscribe messages to reach the primary peer - for len(primaryTopic.ListPeers()) < 0 { + for len(primaryTopic.ListPeers()) > 0 { time.Sleep(time.Millisecond * 100) } @@ -792,7 +792,7 @@ func TestMinTopicSizeNoDiscovery(t *testing.T) { const numHosts = 3 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) sender := getPubsub(ctx, hosts[0]) receiver1 := getPubsub(ctx, hosts[1]) @@ -872,7 +872,7 @@ func TestWithTopicMsgIdFunction(t *testing.T) { const topicA, topicB = "foobarA", "foobarB" const numHosts = 2 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) pubsubs := getPubsubs(ctx, hosts, WithMessageIdFn(func(pmsg *pb.Message) string { hash := sha256.Sum256(pmsg.Data) return string(hash[:]) @@ -932,7 +932,7 @@ func TestTopicPublishWithKeyInvalidParameters(t *testing.T) { const numHosts = 5 virtualPeer := tnet.RandPeerNetParamsOrFatal(t) - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) t.Run("nil sign private key should error", func(t *testing.T) { @@ -959,7 +959,7 @@ func TestTopicRelayPublishWithKey(t *testing.T) { const numHosts = 5 virtualPeer := tnet.RandPeerNetParamsOrFatal(t) - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) // [0.Rel] - [1.Rel] - [2.Sub] @@ -1026,7 +1026,7 @@ func TestWithLocalPublication(t *testing.T) { const topic = "test" - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) pubsubs := getPubsubs(ctx, hosts) topics := getTopics(pubsubs, topic) connectAll(t, hosts) diff --git a/trace.go b/trace.go index 27fac289..7dbb5409 100644 --- a/trace.go +++ b/trace.go @@ -402,11 +402,23 @@ func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta { }) } + var idontwant []*pb.TraceEvent_ControlIDontWantMeta + for _, ctl := range rpc.Control.Idontwant { + var mids [][]byte + for _, mid := range ctl.MessageIDs { + mids = append(mids, []byte(mid)) + } + idontwant = append(idontwant, &pb.TraceEvent_ControlIDontWantMeta{ + MessageIDs: mids, + }) + } + rpcMeta.Control = &pb.TraceEvent_ControlMeta{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, } } diff --git a/trace_test.go b/trace_test.go index fb8cb56d..287216f1 100644 --- a/trace_test.go +++ b/trace_test.go @@ -17,9 +17,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) @@ -27,7 +25,7 @@ func testWithTracer(t *testing.T, tracer EventTracer) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithEventTracer(tracer), // to bootstrap from star topology @@ -302,10 +300,9 @@ func TestRemoteTracer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] mrt := &mockRemoteTracer{} h1.SetStreamHandler(RemoteTracerProtoID, mrt.handleStream) diff --git a/tracer.go b/tracer.go index 8e744c91..cbb92ad7 100644 --- a/tracer.go +++ b/tracer.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) diff --git a/validation_builtin_test.go b/validation_builtin_test.go index df406f26..bca8774c 100644 --- a/validation_builtin_test.go +++ b/validation_builtin_test.go @@ -38,7 +38,7 @@ func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubsWithOptionC(ctx, hosts, func(i int) Option { return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) @@ -86,7 +86,7 @@ func TestBasicSeqnoValidatorReplay(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubsWithOptionC(ctx, hosts[:19], func(i int) Option { return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) @@ -246,7 +246,7 @@ func (r *replayActor) replay(msg *pb.Message) { var peers []peer.ID r.mx.Lock() - for p, _ := range r.out { + for p := range r.out { if rng.Intn(2) > 0 { peers = append(peers, p) } diff --git a/validation_test.go b/validation_test.go index b56e7677..0a09f70b 100644 --- a/validation_test.go +++ b/validation_test.go @@ -15,7 +15,7 @@ func TestRegisterUnregisterValidator(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getPubsubs(ctx, hosts) err := psubs[0].RegisterTopicValidator("foo", func(context.Context, peer.ID, *Message) bool { @@ -40,7 +40,7 @@ func TestRegisterValidatorEx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) err := psubs[0].RegisterTopicValidator("test", @@ -69,7 +69,7 @@ func TestValidate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -123,7 +123,7 @@ func TestValidate2(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getPubsubs(ctx, hosts) topic := "foobar" @@ -201,7 +201,7 @@ func TestValidateOverload(t *testing.T) { for tci, tc := range tcs { t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) { - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -273,7 +273,7 @@ func TestValidateAssortedOptions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts, WithValidateQueueSize(10), WithValidateThrottle(10), diff --git a/version.json b/version.json new file mode 100644 index 00000000..ea22ea59 --- /dev/null +++ b/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.11.0" +}