Merge pull request #953 from ipfs/fix/tests2

Multiple fixes to tests
This commit is contained in:
Hector Sanjuan 2019-11-08 12:47:03 +01:00 committed by GitHub
commit 9db16092cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 55 additions and 67 deletions

View File

@ -27,12 +27,12 @@ jobs:
- travis_wait go test -v -timeout 15m -coverprofile=coverage.txt -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)
- name: "Main Tests with crdt consensus"
- name: "Main Tests with raft consensus"
script:
- go test -v -failfast -consensus crdt .
- travis_wait go test -v -timeout 15m -failfast -consensus raft .
- name: "Main Tests with stateless tracker"
script:
- go test -v -failfast -tracker stateless .
- travis_wait go test -v -timeout 15m -failfast -tracker stateless .
- name: "Golint and go vet"
script:
- go get -u golang.org/x/lint/golint

View File

@ -13,9 +13,9 @@ func TestLBClient(t *testing.T) {
// say we want to retry the request for at most 5 times
cfgs := make([]*Config, 10)
// 5 empty clients
// 5 clients with an invalid api address
for i := 0; i < 5; i++ {
maddr, _ := ma.NewMultiaddr("")
maddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfgs[i] = &Config{
APIAddr: maddr,
DisableKeepAlives: true,
@ -51,7 +51,7 @@ func testRunManyRequestsConcurrently(t *testing.T, cfgs []*Config, strategy LBSt
go func() {
defer wg.Done()
ctx := context.Background()
_, err = c.ID(ctx)
_, err := c.ID(ctx)
if err != nil && pass {
t.Error(err)
}

View File

@ -167,7 +167,8 @@ func (dW *dotWriter) print() error {
// Write cluster nodes, use sorted order for consistent labels
sGraphCluster := dot.NewGraph("")
sGraphCluster.IsSubGraph = true
for _, k := range sortedKeys(dW.clusterEdges) {
sortedClusterEdges := sortedKeys(dW.clusterEdges)
for _, k := range sortedClusterEdges {
var err error
if k == dW.self {
err = dW.addNode(&sGraphCluster, k, tSelfCluster)
@ -187,7 +188,7 @@ func (dW *dotWriter) print() error {
sGraphIPFS := dot.NewGraph("")
sGraphIPFS.IsSubGraph = true
// Write ipfs nodes, use sorted order for consistent labels
for _, k := range sortedKeys(dW.clusterEdges) {
for _, k := range sortedClusterEdges {
err := dW.addNode(&sGraphIPFS, k, tIPFS)
if err != nil {
return err
@ -199,7 +200,8 @@ func (dW *dotWriter) print() error {
dW.dotGraph.AddComment("Edges representing active connections in the cluster")
dW.dotGraph.AddComment("The connections among cluster-service peers")
// Write cluster edges
for k, v := range dW.clusterEdges {
for _, k := range sortedClusterEdges {
v := dW.clusterEdges[k]
for _, id := range v {
toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
@ -210,7 +212,7 @@ func (dW *dotWriter) print() error {
dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons")
// Write cluster to ipfs edges
for k := range dW.clusterEdges {
for _, k := range sortedClusterEdges {
var fromNode *dot.VertexDescription
toNode := dW.clusterNodes[k]
ipfsID, ok := dW.clusterIpfsEdges[k]

View File

@ -3,7 +3,6 @@ package main
import (
"bytes"
"fmt"
"sort"
"strings"
"testing"
@ -18,15 +17,13 @@ func verifyOutput(t *testing.T, outStr string, trueStr string) {
// line.
outLines := strings.Split(outStr, "\n")
trueLines := strings.Split(trueStr, "\n")
sort.Strings(outLines)
sort.Strings(trueLines)
if len(outLines) != len(trueLines) {
fmt.Printf("expected: %s\n actual: %s", trueStr, outStr)
fmt.Printf("expected:\n-%s-\n\n\nactual:\n-%s-", trueStr, outStr)
t.Fatal("Number of output lines does not match expectation")
}
for i := range outLines {
if outLines[i] != trueLines[i] {
t.Errorf("Difference in sorted outputs: %s vs %s", outLines[i], trueLines[i])
t.Errorf("Difference in sorted outputs (%d): %s vs %s", i, outLines[i], trueLines[i])
}
}
}
@ -36,21 +33,19 @@ var simpleIpfs = `digraph cluster {
/* The cluster-service peers */
subgraph {
rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*EhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="11" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*DQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ]
C2 [label=< <B> </B> <BR/> <B> Qm*mJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ]
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
}
/* The ipfs peers linked to cluster peers */
subgraph {
rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*ZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*Ssq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*suL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
}
/* The ipfs swarm peers */
/* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */
C0 -> C1
@ -137,32 +132,27 @@ var allIpfs = `digraph cluster {
/* The cluster-service peers */
subgraph {
rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*EhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="11" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*DQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ]
C2 [label=< <B> </B> <BR/> <B> Qm*mJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="9" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="ellipse" ]
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="box3d" ]
}
/* The ipfs peers linked to cluster peers */
subgraph {
rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*ZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*Ssq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*suL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="1" style="filled" colorscheme="brbg11" fontcolor="6" fontname="Ariel" shape="box" ]
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Ariel" shape="cylinder" ]
}
/* The ipfs swarm peers */
I3 [label=< <B> IPFS </B> <BR/> <B> Qm*ccb </B> > group="QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
I4 [label=< <B> IPFS </B> <BR/> <B> Qm*nM8 </B> > group="QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
I5 [label=< <B> IPFS </B> <BR/> <B> Qm*deD </B> > group="QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD" color="5" style="filled" colorscheme="brbg11" fontcolor="1" fontname="Ariel" shape="box" ]
/* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */
C2 -> C0
C2 -> C1
C0 -> C1
C0 -> C2
C1 -> C0
C1 -> C2
C2 -> C0
C2 -> C1
/* The connections between cluster peers and their ipfs daemons */
C0 -> I0
@ -172,19 +162,10 @@ C2 -> I2
/* The swarm peer connections among ipfs daemons in the cluster */
I1 -> I0
I1 -> I2
I1 -> I3
I1 -> I4
I1 -> I5
I0 -> I1
I0 -> I2
I0 -> I3
I0 -> I4
I0 -> I5
I2 -> I0
I2 -> I1
I2 -> I3
I2 -> I4
I2 -> I5
}`
func TestIpfsAllGraphs(t *testing.T) {

View File

@ -34,33 +34,33 @@ var testingClusterCfg = []byte(`{
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "350ms",
"peer_watch_interval": "200ms",
"monitor_ping_interval": "1s",
"peer_watch_interval": "1s",
"disable_repinning": false,
"mdns_interval": "0s"
}`)
var testingRaftCfg = []byte(`{
"data_folder": "raftFolderFromTests",
"wait_for_leader_timeout": "10s",
"wait_for_leader_timeout": "5s",
"commit_retries": 2,
"commit_retry_delay": "50ms",
"backups_rotate": 2,
"network_timeout": "5s",
"heartbeat_timeout": "200ms",
"election_timeout": "200ms",
"commit_timeout": "150ms",
"heartbeat_timeout": "700ms",
"election_timeout": "1s",
"commit_timeout": "250ms",
"max_append_entries": 256,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "200ms"
"leader_lease_timeout": "500ms"
}`)
var testingCrdtCfg = []byte(`{
"cluster_name": "crdt-test",
"trusted_peers": ["*"],
"rebroadcast_interval": "150ms"
"rebroadcast_interval": "250ms"
}`)
var testingBadgerCfg = []byte(`{
@ -114,12 +114,12 @@ var testingTrackerCfg = []byte(`
`)
var testingMonCfg = []byte(`{
"check_interval": "550ms",
"check_interval": "800ms",
"failure_threshold": 6
}`)
var testingDiskInfCfg = []byte(`{
"metric_ttl": "350ms",
"metric_ttl": "900ms",
"metric_type": "freespace"
}`)

View File

@ -56,9 +56,10 @@ var (
customLogLvlFacilities = logFacilities{}
ptracker = "map"
consensus = "raft"
consensus = "crdt"
testsFolder = "clusterTestsFolder"
ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL
testsFolder = "clusterTestsFolder"
// When testing with fixed ports...
// clusterPort = 10000
@ -124,6 +125,10 @@ func TestMain(m *testing.M) {
}
}
diskInfCfg := &disk.Config{}
diskInfCfg.LoadJSON(testingDiskInfCfg)
ttlDelayTime = diskInfCfg.MetricTTL * 2
os.Exit(m.Run())
}
@ -298,14 +303,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host
dhts := make([]*dht.IpfsDHT, nClusters, nClusters)
tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
// Disable quic as it is proving a bit unstable
//quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
for i := range hosts {
priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
if err != nil {
t.Fatal(err)
}
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr})
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{tcpaddr})
hosts[i] = h
dhts[i] = d
pubsubs[i] = p
@ -382,6 +388,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0])
<-clusters[0].Ready()
bootstrapAddr := clusterAddr(clusters[0])
// Start the rest and join
for i := 1; i < nClusters; i++ {
clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i])
@ -481,9 +488,7 @@ func pinDelay() {
}
func ttlDelay() {
diskInfCfg := &disk.Config{}
diskInfCfg.LoadJSON(testingDiskInfCfg)
time.Sleep(diskInfCfg.MetricTTL * 3)
time.Sleep(ttlDelayTime)
}
// Like waitForLeader but letting metrics expire before waiting, and

View File

@ -111,7 +111,7 @@ func TestClustersPeerAdd(t *testing.T) {
if err != nil {
t.Fatal(err)
}
pinDelay()
ttlDelay()
f := func(t *testing.T, c *Cluster) {
ids := c.Peers(ctx)
@ -420,7 +420,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
// We choose to remove the leader, to make things even more interesting
chosenID, err := clusters[0].consensus.Leader(ctx)
if err != nil {
// choose a random peer
// choose a random peer - crdt
i := rand.Intn(nClusters)
chosenID = clusters[i].host.ID()
}
@ -527,7 +527,7 @@ func TestClustersPeerJoin(t *testing.T) {
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay()
ttlDelay()
for _, p := range clusters {
t.Log(p.id.String())
@ -574,7 +574,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay()
ttlDelay()
f2 := func(t *testing.T, c *Cluster) {
peers := c.Peers(ctx)

View File

@ -342,7 +342,7 @@ func (pm *Manager) SetPriority(pid peer.ID, prio int) error {
return pm.host.Peerstore().Put(pid, PriorityTag, prio)
}
// HandlePeerFound implements the Notifee interface for discovery.
// HandlePeerFound implements the Notifee interface for discovery (mdns).
func (pm *Manager) HandlePeerFound(p peer.AddrInfo) {
addrs, err := peer.AddrInfoToP2pAddrs(&p)
if err != nil {