Merge pull request #953 from ipfs/fix/tests2

Multiple fixes to tests
This commit is contained in:
Hector Sanjuan 2019-11-08 12:47:03 +01:00 committed by GitHub
commit 9db16092cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 55 additions and 67 deletions

View File

@ -27,12 +27,12 @@ jobs:
- travis_wait go test -v -timeout 15m -coverprofile=coverage.txt -covermode=atomic ./... - travis_wait go test -v -timeout 15m -coverprofile=coverage.txt -covermode=atomic ./...
after_success: after_success:
- bash <(curl -s https://codecov.io/bash) - bash <(curl -s https://codecov.io/bash)
- name: "Main Tests with crdt consensus" - name: "Main Tests with raft consensus"
script: script:
- go test -v -failfast -consensus crdt . - travis_wait go test -v -timeout 15m -failfast -consensus raft .
- name: "Main Tests with stateless tracker" - name: "Main Tests with stateless tracker"
script: script:
- go test -v -failfast -tracker stateless . - travis_wait go test -v -timeout 15m -failfast -tracker stateless .
- name: "Golint and go vet" - name: "Golint and go vet"
script: script:
- go get -u golang.org/x/lint/golint - go get -u golang.org/x/lint/golint

View File

@ -13,9 +13,9 @@ func TestLBClient(t *testing.T) {
// say we want to retry the request for at most 5 times // say we want to retry the request for at most 5 times
cfgs := make([]*Config, 10) cfgs := make([]*Config, 10)
// 5 empty clients // 5 clients with an invalid api address
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
maddr, _ := ma.NewMultiaddr("") maddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfgs[i] = &Config{ cfgs[i] = &Config{
APIAddr: maddr, APIAddr: maddr,
DisableKeepAlives: true, DisableKeepAlives: true,
@ -51,7 +51,7 @@ func testRunManyRequestsConcurrently(t *testing.T, cfgs []*Config, strategy LBSt
go func() { go func() {
defer wg.Done() defer wg.Done()
ctx := context.Background() ctx := context.Background()
_, err = c.ID(ctx) _, err := c.ID(ctx)
if err != nil && pass { if err != nil && pass {
t.Error(err) t.Error(err)
} }

View File

@ -167,7 +167,8 @@ func (dW *dotWriter) print() error {
// Write cluster nodes, use sorted order for consistent labels // Write cluster nodes, use sorted order for consistent labels
sGraphCluster := dot.NewGraph("") sGraphCluster := dot.NewGraph("")
sGraphCluster.IsSubGraph = true sGraphCluster.IsSubGraph = true
for _, k := range sortedKeys(dW.clusterEdges) { sortedClusterEdges := sortedKeys(dW.clusterEdges)
for _, k := range sortedClusterEdges {
var err error var err error
if k == dW.self { if k == dW.self {
err = dW.addNode(&sGraphCluster, k, tSelfCluster) err = dW.addNode(&sGraphCluster, k, tSelfCluster)
@ -187,7 +188,7 @@ func (dW *dotWriter) print() error {
sGraphIPFS := dot.NewGraph("") sGraphIPFS := dot.NewGraph("")
sGraphIPFS.IsSubGraph = true sGraphIPFS.IsSubGraph = true
// Write ipfs nodes, use sorted order for consistent labels // Write ipfs nodes, use sorted order for consistent labels
for _, k := range sortedKeys(dW.clusterEdges) { for _, k := range sortedClusterEdges {
err := dW.addNode(&sGraphIPFS, k, tIPFS) err := dW.addNode(&sGraphIPFS, k, tIPFS)
if err != nil { if err != nil {
return err return err
@ -199,7 +200,8 @@ func (dW *dotWriter) print() error {
dW.dotGraph.AddComment("Edges representing active connections in the cluster") dW.dotGraph.AddComment("Edges representing active connections in the cluster")
dW.dotGraph.AddComment("The connections among cluster-service peers") dW.dotGraph.AddComment("The connections among cluster-service peers")
// Write cluster edges // Write cluster edges
for k, v := range dW.clusterEdges { for _, k := range sortedClusterEdges {
v := dW.clusterEdges[k]
for _, id := range v { for _, id := range v {
toNode := dW.clusterNodes[k] toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[peer.IDB58Encode(id)] fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
@ -210,7 +212,7 @@ func (dW *dotWriter) print() error {
dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons") dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons")
// Write cluster to ipfs edges // Write cluster to ipfs edges
for k := range dW.clusterEdges { for _, k := range sortedClusterEdges {
var fromNode *dot.VertexDescription var fromNode *dot.VertexDescription
toNode := dW.clusterNodes[k] toNode := dW.clusterNodes[k]
ipfsID, ok := dW.clusterIpfsEdges[k] ipfsID, ok := dW.clusterIpfsEdges[k]

View File

@ -3,7 +3,6 @@ package main
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"sort"
"strings" "strings"
"testing" "testing"
@ -18,15 +17,13 @@ func verifyOutput(t *testing.T, outStr string, trueStr string) {
// line. // line.
outLines := strings.Split(outStr, "\n") outLines := strings.Split(outStr, "\n")
trueLines := strings.Split(trueStr, "\n") trueLines := strings.Split(trueStr, "\n")
sort.Strings(outLines)
sort.Strings(trueLines)
if len(outLines) != len(trueLines) { if len(outLines) != len(trueLines) {
fmt.Printf("expected: %s\n actual: %s", trueStr, outStr) fmt.Printf("expected:\n-%s-\n\n\nactual:\n-%s-", trueStr, outStr)
t.Fatal("Number of output lines does not match expectation") t.Fatal("Number of output lines does not match expectation")
} }
for i := range outLines { for i := range outLines {
if outLines[i] != trueLines[i] { if outLines[i] != trueLines[i] {
t.Errorf("Difference in sorted outputs: %s vs %s", outLines[i], trueLines[i]) t.Errorf("Difference in sorted outputs (%d): %s vs %s", i, outLines[i], trueLines[i])
} }
} }
} }
@ -36,21 +33,19 @@ var simpleIpfs = `digraph cluster {
/* The cluster-service peers */ /* The cluster-service peers */
subgraph { subgraph {
rank="min" rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*EhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="11" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" peripheries="2" ] C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*DQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ] C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*mJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ] C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
} }
/* The ipfs peers linked to cluster peers */ /* The ipfs peers linked to cluster peers */
subgraph { subgraph {
rank="max" rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*ZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I0 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*Ssq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I1 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*suL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
} }
/* The ipfs swarm peers */
/* Edges representing active connections in the cluster */ /* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */ /* The connections among cluster-service peers */
C0 -> C1 C0 -> C1
@ -137,32 +132,27 @@ var allIpfs = `digraph cluster {
/* The cluster-service peers */ /* The cluster-service peers */
subgraph { subgraph {
rank="min" rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*EhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="11" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" peripheries="2" ] C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*DQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ] C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*mJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ] C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
} }
/* The ipfs peers linked to cluster peers */ /* The ipfs peers linked to cluster peers */
subgraph { subgraph {
rank="max" rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*ZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I0 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*Ssq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I1 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*suL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ] I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
} }
/* The ipfs swarm peers */
I3 [label=< <B> IPFS </B> <BR/> <B> Qm*ccb </B> > group="QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
I4 [label=< <B> IPFS </B> <BR/> <B> Qm*nM8 </B> > group="QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
I5 [label=< <B> IPFS </B> <BR/> <B> Qm*deD </B> > group="QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
/* Edges representing active connections in the cluster */ /* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */ /* The connections among cluster-service peers */
C2 -> C0
C2 -> C1
C0 -> C1 C0 -> C1
C0 -> C2 C0 -> C2
C1 -> C0 C1 -> C0
C1 -> C2 C1 -> C2
C2 -> C0
C2 -> C1
/* The connections between cluster peers and their ipfs daemons */ /* The connections between cluster peers and their ipfs daemons */
C0 -> I0 C0 -> I0
@ -172,19 +162,10 @@ C2 -> I2
/* The swarm peer connections among ipfs daemons in the cluster */ /* The swarm peer connections among ipfs daemons in the cluster */
I1 -> I0 I1 -> I0
I1 -> I2 I1 -> I2
I1 -> I3
I1 -> I4
I1 -> I5
I0 -> I1 I0 -> I1
I0 -> I2 I0 -> I2
I0 -> I3
I0 -> I4
I0 -> I5
I2 -> I0 I2 -> I0
I2 -> I1 I2 -> I1
I2 -> I3
I2 -> I4
I2 -> I5
}` }`
func TestIpfsAllGraphs(t *testing.T) { func TestIpfsAllGraphs(t *testing.T) {

View File

@ -34,33 +34,33 @@ var testingClusterCfg = []byte(`{
"state_sync_interval": "1m0s", "state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s", "ipfs_sync_interval": "2m10s",
"replication_factor": -1, "replication_factor": -1,
"monitor_ping_interval": "350ms", "monitor_ping_interval": "1s",
"peer_watch_interval": "200ms", "peer_watch_interval": "1s",
"disable_repinning": false, "disable_repinning": false,
"mdns_interval": "0s" "mdns_interval": "0s"
}`) }`)
var testingRaftCfg = []byte(`{ var testingRaftCfg = []byte(`{
"data_folder": "raftFolderFromTests", "data_folder": "raftFolderFromTests",
"wait_for_leader_timeout": "10s", "wait_for_leader_timeout": "5s",
"commit_retries": 2, "commit_retries": 2,
"commit_retry_delay": "50ms", "commit_retry_delay": "50ms",
"backups_rotate": 2, "backups_rotate": 2,
"network_timeout": "5s", "network_timeout": "5s",
"heartbeat_timeout": "200ms", "heartbeat_timeout": "700ms",
"election_timeout": "200ms", "election_timeout": "1s",
"commit_timeout": "150ms", "commit_timeout": "250ms",
"max_append_entries": 256, "max_append_entries": 256,
"trailing_logs": 10240, "trailing_logs": 10240,
"snapshot_interval": "2m0s", "snapshot_interval": "2m0s",
"snapshot_threshold": 8192, "snapshot_threshold": 8192,
"leader_lease_timeout": "200ms" "leader_lease_timeout": "500ms"
}`) }`)
var testingCrdtCfg = []byte(`{ var testingCrdtCfg = []byte(`{
"cluster_name": "crdt-test", "cluster_name": "crdt-test",
"trusted_peers": ["*"], "trusted_peers": ["*"],
"rebroadcast_interval": "150ms" "rebroadcast_interval": "250ms"
}`) }`)
var testingBadgerCfg = []byte(`{ var testingBadgerCfg = []byte(`{
@ -114,12 +114,12 @@ var testingTrackerCfg = []byte(`
`) `)
var testingMonCfg = []byte(`{ var testingMonCfg = []byte(`{
"check_interval": "550ms", "check_interval": "800ms",
"failure_threshold": 6 "failure_threshold": 6
}`) }`)
var testingDiskInfCfg = []byte(`{ var testingDiskInfCfg = []byte(`{
"metric_ttl": "350ms", "metric_ttl": "900ms",
"metric_type": "freespace" "metric_type": "freespace"
}`) }`)

View File

@ -56,9 +56,10 @@ var (
customLogLvlFacilities = logFacilities{} customLogLvlFacilities = logFacilities{}
ptracker = "map" ptracker = "map"
consensus = "raft" consensus = "crdt"
testsFolder = "clusterTestsFolder" ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL
testsFolder = "clusterTestsFolder"
// When testing with fixed ports... // When testing with fixed ports...
// clusterPort = 10000 // clusterPort = 10000
@ -124,6 +125,10 @@ func TestMain(m *testing.M) {
} }
} }
diskInfCfg := &disk.Config{}
diskInfCfg.LoadJSON(testingDiskInfCfg)
ttlDelayTime = diskInfCfg.MetricTTL * 2
os.Exit(m.Run()) os.Exit(m.Run())
} }
@ -298,14 +303,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host
dhts := make([]*dht.IpfsDHT, nClusters, nClusters) dhts := make([]*dht.IpfsDHT, nClusters, nClusters)
tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic") // Disable quic as it is proving a bit unstable
//quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
for i := range hosts { for i := range hosts {
priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048) priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr}) h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{tcpaddr})
hosts[i] = h hosts[i] = h
dhts[i] = d dhts[i] = d
pubsubs[i] = p pubsubs[i] = p
@ -382,6 +388,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0]) clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0])
<-clusters[0].Ready() <-clusters[0].Ready()
bootstrapAddr := clusterAddr(clusters[0]) bootstrapAddr := clusterAddr(clusters[0])
// Start the rest and join // Start the rest and join
for i := 1; i < nClusters; i++ { for i := 1; i < nClusters; i++ {
clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i]) clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i])
@ -481,9 +488,7 @@ func pinDelay() {
} }
func ttlDelay() { func ttlDelay() {
diskInfCfg := &disk.Config{} time.Sleep(ttlDelayTime)
diskInfCfg.LoadJSON(testingDiskInfCfg)
time.Sleep(diskInfCfg.MetricTTL * 3)
} }
// Like waitForLeader but letting metrics expire before waiting, and // Like waitForLeader but letting metrics expire before waiting, and

View File

@ -111,7 +111,7 @@ func TestClustersPeerAdd(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pinDelay() ttlDelay()
f := func(t *testing.T, c *Cluster) { f := func(t *testing.T, c *Cluster) {
ids := c.Peers(ctx) ids := c.Peers(ctx)
@ -420,7 +420,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
// We choose to remove the leader, to make things even more interesting // We choose to remove the leader, to make things even more interesting
chosenID, err := clusters[0].consensus.Leader(ctx) chosenID, err := clusters[0].consensus.Leader(ctx)
if err != nil { if err != nil {
// choose a random peer // choose a random peer - crdt
i := rand.Intn(nClusters) i := rand.Intn(nClusters)
chosenID = clusters[i].host.ID() chosenID = clusters[i].host.ID()
} }
@ -527,7 +527,7 @@ func TestClustersPeerJoin(t *testing.T) {
h := test.Cid1 h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{}) clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay() ttlDelay()
for _, p := range clusters { for _, p := range clusters {
t.Log(p.id.String()) t.Log(p.id.String())
@ -574,7 +574,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
h := test.Cid1 h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{}) clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay() ttlDelay()
f2 := func(t *testing.T, c *Cluster) { f2 := func(t *testing.T, c *Cluster) {
peers := c.Peers(ctx) peers := c.Peers(ctx)

View File

@ -342,7 +342,7 @@ func (pm *Manager) SetPriority(pid peer.ID, prio int) error {
return pm.host.Peerstore().Put(pid, PriorityTag, prio) return pm.host.Peerstore().Put(pid, PriorityTag, prio)
} }
// HandlePeerFound implements the Notifee interface for discovery. // HandlePeerFound implements the Notifee interface for discovery (mdns).
func (pm *Manager) HandlePeerFound(p peer.AddrInfo) { func (pm *Manager) HandlePeerFound(p peer.AddrInfo) {
addrs, err := peer.AddrInfoToP2pAddrs(&p) addrs, err := peer.AddrInfoToP2pAddrs(&p)
if err != nil { if err != nil {