Skip to content

Commit 676e557

Browse files
committed
test: port peering test from sharness to Go
This is the slowest test in the sharness test suite, because it has very long sleeps. It usually takes 2+ minutes to run. This new impl runs all peering tests in about 20 seconds, since it polls for conditions instead of sleeping, and runs the tests in parallel. This also has an additional test case for a peer that was never online and then connects.
1 parent 7f7a5ab commit 676e557

File tree

7 files changed

+358
-145
lines changed

7 files changed

+358
-145
lines changed

Diff for: test/cli/harness/harness.go

+21
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ import (
1111

1212
logging "github.com/ipfs/go-log/v2"
1313
. "github.com/ipfs/kubo/test/cli/testutils"
14+
"github.com/libp2p/go-libp2p/core/peer"
15+
"github.com/multiformats/go-multiaddr"
1416
)
1517

1618
// Harness tracks state for a test, such as temp dirs and IFPS nodes, and cleans them up after the test.
@@ -188,3 +190,22 @@ func (h *Harness) Cleanup() {
188190
log.Panicf("removing temp dir %s: %s", h.Dir, err)
189191
}
190192
}
193+
194+
// ExtractPeerID extracts a peer ID from the given multiaddr, and fatals if it does not contain a peer ID.
195+
func (h *Harness) ExtractPeerID(m multiaddr.Multiaddr) peer.ID {
196+
var peerIDStr string
197+
multiaddr.ForEach(m, func(c multiaddr.Component) bool {
198+
if c.Protocol().Code == multiaddr.P_P2P {
199+
peerIDStr = c.Value()
200+
}
201+
return true
202+
})
203+
if peerIDStr == "" {
204+
panic(multiaddr.ErrProtocolNotFound)
205+
}
206+
peerID, err := peer.Decode(peerIDStr)
207+
if err != nil {
208+
panic(err)
209+
}
210+
return peerID
211+
}

Diff for: test/cli/harness/log.go

+155
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
package harness
2+
3+
import (
4+
"fmt"
5+
"path/filepath"
6+
"runtime"
7+
"sort"
8+
"strings"
9+
"sync"
10+
"testing"
11+
"time"
12+
)
13+
14+
type event struct {
15+
timestamp time.Time
16+
msg string
17+
}
18+
19+
type events []*event
20+
21+
func (e events) Len() int { return len(e) }
22+
func (e events) Less(i, j int) bool { return e[i].timestamp.Before(e[j].timestamp) }
23+
func (e events) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
24+
25+
// TestLogger is a logger for tests.
26+
// It buffers output and only writes the output if the test fails or output is explicitly turned on.
27+
// The purpose of this logger is to allow Go test to run with the verbose flag without printing logs.
28+
// The verbose flag is useful since it streams test progress, but also printing logs makes the output too verbose.
29+
//
30+
// You can also add prefixes that are prepended to each log message, for extra logging context.
31+
//
32+
// This is implemented as a hierarchy of loggers, with children flushing log entries back to parents.
33+
// This works because t.Cleanup() processes entries in LIFO order, so children always flush first.
34+
//
35+
// Obviously this logger should never be used in production systems.
36+
type TestLogger struct {
37+
parent *TestLogger
38+
children []*TestLogger
39+
prefixes []string
40+
prefixesIface []any
41+
t *testing.T
42+
buf events
43+
m sync.Mutex
44+
logsEnabled bool
45+
}
46+
47+
func NewTestLogger(t *testing.T) *TestLogger {
48+
l := &TestLogger{t: t, buf: make(events, 0)}
49+
t.Cleanup(l.flush)
50+
return l
51+
}
52+
53+
func (t *TestLogger) buildPrefix(timestamp time.Time) string {
54+
d := timestamp.Format("2006-01-02T15:04:05.999999")
55+
_, file, lineno, _ := runtime.Caller(2)
56+
file = filepath.Base(file)
57+
caller := fmt.Sprintf("%s:%d", file, lineno)
58+
59+
if len(t.prefixes) == 0 {
60+
return fmt.Sprintf("%s\t%s\t", d, caller)
61+
}
62+
63+
prefixes := strings.Join(t.prefixes, ":")
64+
return fmt.Sprintf("%s\t%s\t%s: ", d, caller, prefixes)
65+
}
66+
67+
func (t *TestLogger) Log(args ...any) {
68+
timestamp := time.Now()
69+
e := t.buildPrefix(timestamp) + fmt.Sprint(args...)
70+
t.add(&event{timestamp: timestamp, msg: e})
71+
}
72+
73+
func (t *TestLogger) Logf(format string, args ...any) {
74+
timestamp := time.Now()
75+
e := t.buildPrefix(timestamp) + fmt.Sprintf(format, args...)
76+
t.add(&event{timestamp: timestamp, msg: e})
77+
}
78+
79+
func (t *TestLogger) Fatal(args ...any) {
80+
timestamp := time.Now()
81+
e := t.buildPrefix(timestamp) + fmt.Sprint(append([]any{"fatal: "}, args...)...)
82+
t.add(&event{timestamp: timestamp, msg: e})
83+
t.t.FailNow()
84+
}
85+
86+
func (t *TestLogger) Fatalf(format string, args ...any) {
87+
timestamp := time.Now()
88+
e := t.buildPrefix(timestamp) + fmt.Sprintf(fmt.Sprintf("fatal: %s", format), args...)
89+
t.add(&event{timestamp: timestamp, msg: e})
90+
t.t.FailNow()
91+
}
92+
93+
func (t *TestLogger) add(e *event) {
94+
t.m.Lock()
95+
defer t.m.Unlock()
96+
t.buf = append(t.buf, e)
97+
}
98+
99+
func (t *TestLogger) AddPrefix(prefix string) *TestLogger {
100+
l := &TestLogger{
101+
prefixes: append(t.prefixes, prefix),
102+
prefixesIface: append(t.prefixesIface, prefix),
103+
t: t.t,
104+
parent: t,
105+
logsEnabled: t.logsEnabled,
106+
}
107+
t.m.Lock()
108+
defer t.m.Unlock()
109+
110+
t.children = append(t.children, l)
111+
t.t.Cleanup(l.flush)
112+
113+
return l
114+
}
115+
116+
func (t *TestLogger) EnableLogs() {
117+
t.m.Lock()
118+
defer t.m.Unlock()
119+
t.logsEnabled = true
120+
if t.parent != nil {
121+
if t.parent.logsEnabled {
122+
t.parent.EnableLogs()
123+
}
124+
}
125+
fmt.Printf("enabling %d children\n", len(t.children))
126+
for _, c := range t.children {
127+
if !c.logsEnabled {
128+
c.EnableLogs()
129+
}
130+
}
131+
}
132+
133+
func (t *TestLogger) flush() {
134+
if t.t.Failed() || t.logsEnabled {
135+
t.m.Lock()
136+
defer t.m.Unlock()
137+
// if this is a child, send the events to the parent
138+
// the root parent will print all the events in sorted order
139+
if t.parent != nil {
140+
for _, e := range t.buf {
141+
t.parent.add(e)
142+
}
143+
} else {
144+
// we're the root, sort all the events and then print them
145+
sort.Sort(t.buf)
146+
fmt.Println()
147+
fmt.Printf("Logs for test %q:\n\n", t.t.Name())
148+
for _, e := range t.buf {
149+
fmt.Println(e.msg)
150+
}
151+
fmt.Println()
152+
}
153+
t.buf = nil
154+
}
155+
}

Diff for: test/cli/harness/node.go

+23-2
Original file line numberDiff line numberDiff line change
@@ -453,9 +453,8 @@ func (n *Node) Peers() []multiaddr.Multiaddr {
453453
Path: n.IPFSBin,
454454
Args: []string{"swarm", "peers"},
455455
})
456-
lines := strings.Split(strings.TrimSpace(res.Stdout.String()), "\n")
457456
var addrs []multiaddr.Multiaddr
458-
for _, line := range lines {
457+
for _, line := range res.Stdout.Lines() {
459458
ma, err := multiaddr.NewMultiaddr(line)
460459
if err != nil {
461460
panic(err)
@@ -465,6 +464,28 @@ func (n *Node) Peers() []multiaddr.Multiaddr {
465464
return addrs
466465
}
467466

467+
func (n *Node) PeerWith(other *Node) {
468+
n.UpdateConfig(func(cfg *config.Config) {
469+
var addrs []multiaddr.Multiaddr
470+
for _, addrStr := range other.ReadConfig().Addresses.Swarm {
471+
ma, err := multiaddr.NewMultiaddr(addrStr)
472+
if err != nil {
473+
panic(err)
474+
}
475+
addrs = append(addrs, ma)
476+
}
477+
478+
cfg.Peering.Peers = append(cfg.Peering.Peers, peer.AddrInfo{
479+
ID: other.PeerID(),
480+
Addrs: addrs,
481+
})
482+
})
483+
}
484+
485+
func (n *Node) Disconnect(other *Node) {
486+
n.IPFS("swarm", "disconnect", "/p2p/"+other.PeerID().String())
487+
}
488+
468489
// GatewayURL waits for the gateway file and then returns its contents or times out.
469490
func (n *Node) GatewayURL() string {
470491
timer := time.NewTimer(1 * time.Second)

Diff for: test/cli/harness/nodes.go

+4-16
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package harness
33
import (
44
"sync"
55

6+
. "github.com/ipfs/kubo/test/cli/testutils"
67
"github.com/multiformats/go-multiaddr"
78
"golang.org/x/sync/errgroup"
89
)
@@ -11,9 +12,7 @@ import (
1112
type Nodes []*Node
1213

1314
func (n Nodes) Init(args ...string) Nodes {
14-
for _, node := range n {
15-
node.Init()
16-
}
15+
ForEachPar(n, func(node *Node) { node.Init(args...) })
1716
return n
1817
}
1918

@@ -59,22 +58,11 @@ func (n Nodes) Connect() Nodes {
5958
}
6059

6160
func (n Nodes) StartDaemons() Nodes {
62-
wg := sync.WaitGroup{}
63-
for _, node := range n {
64-
wg.Add(1)
65-
node := node
66-
go func() {
67-
defer wg.Done()
68-
node.StartDaemon()
69-
}()
70-
}
71-
wg.Wait()
61+
ForEachPar(n, func(node *Node) { node.StartDaemon() })
7262
return n
7363
}
7464

7565
func (n Nodes) StopDaemons() Nodes {
76-
for _, node := range n {
77-
node.StopDaemon()
78-
}
66+
ForEachPar(n, func(node *Node) { node.StopDaemon() })
7967
return n
8068
}

0 commit comments

Comments
 (0)