From 7b4d647267e43f6496d7b8cb34646a2a0feac570 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 7 Nov 2019 20:37:03 +0100 Subject: [PATCH] Tests: multiple fixes, increase timings --- .travis.yml | 4 ++-- config_test.go | 20 ++++++++++---------- ipfscluster_test.go | 17 +++++++++++------ peer_manager_test.go | 8 ++++---- 4 files changed, 27 insertions(+), 22 deletions(-) diff --git a/.travis.yml b/.travis.yml index eb3c521c..04db3c85 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,10 +29,10 @@ jobs: - bash <(curl -s https://codecov.io/bash) - name: "Main Tests with raft consensus" script: - - go test -v -failfast -consensus raft . + - travis_wait go test -v -timeout 15m -failfast -consensus raft . - name: "Main Tests with stateless tracker" script: - - go test -v -failfast -tracker stateless . + - travis_wait go test -v -timeout 15m -failfast -tracker stateless . - name: "Golint and go vet" script: - go get -u golang.org/x/lint/golint diff --git a/config_test.go b/config_test.go index 2185bf92..c6746b7d 100644 --- a/config_test.go +++ b/config_test.go @@ -34,33 +34,33 @@ var testingClusterCfg = []byte(`{ "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, - "monitor_ping_interval": "350ms", - "peer_watch_interval": "200ms", + "monitor_ping_interval": "1s", + "peer_watch_interval": "1s", "disable_repinning": false, "mdns_interval": "0s" }`) var testingRaftCfg = []byte(`{ "data_folder": "raftFolderFromTests", - "wait_for_leader_timeout": "10s", + "wait_for_leader_timeout": "5s", "commit_retries": 2, "commit_retry_delay": "50ms", "backups_rotate": 2, "network_timeout": "5s", - "heartbeat_timeout": "200ms", - "election_timeout": "200ms", - "commit_timeout": "150ms", + "heartbeat_timeout": "700ms", + "election_timeout": "1s", + "commit_timeout": "250ms", "max_append_entries": 256, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, - "leader_lease_timeout": "200ms" + "leader_lease_timeout": "500ms" }`) var testingCrdtCfg = []byte(`{ "cluster_name": "crdt-test", "trusted_peers": ["*"], - "rebroadcast_interval": "150ms" + "rebroadcast_interval": "250ms" }`) var testingBadgerCfg = []byte(`{ @@ -114,12 +114,12 @@ var testingTrackerCfg = []byte(` `) var testingMonCfg = []byte(`{ - "check_interval": "550ms", + "check_interval": "800ms", "failure_threshold": 6 }`) var testingDiskInfCfg = []byte(`{ - "metric_ttl": "350ms", + "metric_ttl": "900ms", "metric_type": "freespace" }`) diff --git a/ipfscluster_test.go b/ipfscluster_test.go index 5dc86404..d13f3a22 100644 --- a/ipfscluster_test.go +++ b/ipfscluster_test.go @@ -58,7 +58,8 @@ var ( ptracker = "map" consensus = "crdt" - testsFolder = "clusterTestsFolder" + ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL + testsFolder = "clusterTestsFolder" // When testing with fixed ports... // clusterPort = 10000 @@ -124,6 +125,10 @@ func TestMain(m *testing.M) { } } + diskInfCfg := &disk.Config{} + diskInfCfg.LoadJSON(testingDiskInfCfg) + ttlDelayTime = diskInfCfg.MetricTTL * 2 + os.Exit(m.Run()) } @@ -298,14 +303,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host dhts := make([]*dht.IpfsDHT, nClusters, nClusters) tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic") + // Disable quic as it is proving a bit unstable + //quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic") for i := range hosts { priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048) if err != nil { t.Fatal(err) } - h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr}) + h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{tcpaddr}) hosts[i] = h dhts[i] = d pubsubs[i] = p @@ -382,6 +388,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) { clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0]) <-clusters[0].Ready() bootstrapAddr := clusterAddr(clusters[0]) + // Start the rest and join for i := 1; i < nClusters; i++ { clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i]) @@ -481,9 +488,7 @@ func pinDelay() { } func ttlDelay() { - diskInfCfg := &disk.Config{} - diskInfCfg.LoadJSON(testingDiskInfCfg) - time.Sleep(diskInfCfg.MetricTTL * 3) + time.Sleep(ttlDelayTime) } // Like waitForLeader but letting metrics expire before waiting, and diff --git a/peer_manager_test.go b/peer_manager_test.go index 881c02d5..de8ee3d8 100644 --- a/peer_manager_test.go +++ b/peer_manager_test.go @@ -111,7 +111,7 @@ func TestClustersPeerAdd(t *testing.T) { if err != nil { t.Fatal(err) } - pinDelay() + ttlDelay() f := func(t *testing.T, c *Cluster) { ids := c.Peers(ctx) @@ -420,7 +420,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) { // We choose to remove the leader, to make things even more interesting chosenID, err := clusters[0].consensus.Leader(ctx) if err != nil { - // choose a random peer + // choose a random peer - crdt i := rand.Intn(nClusters) chosenID = clusters[i].host.ID() } @@ -527,7 +527,7 @@ func TestClustersPeerJoin(t *testing.T) { h := test.Cid1 clusters[0].Pin(ctx, h, api.PinOptions{}) - pinDelay() + ttlDelay() for _, p := range clusters { t.Log(p.id.String()) @@ -574,7 +574,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) { h := test.Cid1 clusters[0].Pin(ctx, h, api.PinOptions{}) - pinDelay() + ttlDelay() f2 := func(t *testing.T, c *Cluster) { peers := c.Peers(ctx)