Tests: multiple fixes, increase timings

This commit is contained in:
Hector Sanjuan 2019-11-07 20:37:03 +01:00
parent 9649664dbd
commit 7b4d647267
4 changed files with 27 additions and 22 deletions

View File

@ -29,10 +29,10 @@ jobs:
- bash <(curl -s https://codecov.io/bash)
- name: "Main Tests with raft consensus"
script:
- go test -v -failfast -consensus raft .
- travis_wait go test -v -timeout 15m -failfast -consensus raft .
- name: "Main Tests with stateless tracker"
script:
- go test -v -failfast -tracker stateless .
- travis_wait go test -v -timeout 15m -failfast -tracker stateless .
- name: "Golint and go vet"
script:
- go get -u golang.org/x/lint/golint

View File

@ -34,33 +34,33 @@ var testingClusterCfg = []byte(`{
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "350ms",
"peer_watch_interval": "200ms",
"monitor_ping_interval": "1s",
"peer_watch_interval": "1s",
"disable_repinning": false,
"mdns_interval": "0s"
}`)
var testingRaftCfg = []byte(`{
"data_folder": "raftFolderFromTests",
"wait_for_leader_timeout": "10s",
"wait_for_leader_timeout": "5s",
"commit_retries": 2,
"commit_retry_delay": "50ms",
"backups_rotate": 2,
"network_timeout": "5s",
"heartbeat_timeout": "200ms",
"election_timeout": "200ms",
"commit_timeout": "150ms",
"heartbeat_timeout": "700ms",
"election_timeout": "1s",
"commit_timeout": "250ms",
"max_append_entries": 256,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "200ms"
"leader_lease_timeout": "500ms"
}`)
var testingCrdtCfg = []byte(`{
"cluster_name": "crdt-test",
"trusted_peers": ["*"],
"rebroadcast_interval": "150ms"
"rebroadcast_interval": "250ms"
}`)
var testingBadgerCfg = []byte(`{
@ -114,12 +114,12 @@ var testingTrackerCfg = []byte(`
`)
var testingMonCfg = []byte(`{
"check_interval": "550ms",
"check_interval": "800ms",
"failure_threshold": 6
}`)
var testingDiskInfCfg = []byte(`{
"metric_ttl": "350ms",
"metric_ttl": "900ms",
"metric_type": "freespace"
}`)

View File

@ -58,7 +58,8 @@ var (
ptracker = "map"
consensus = "crdt"
testsFolder = "clusterTestsFolder"
ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL
testsFolder = "clusterTestsFolder"
// When testing with fixed ports...
// clusterPort = 10000
@ -124,6 +125,10 @@ func TestMain(m *testing.M) {
}
}
diskInfCfg := &disk.Config{}
diskInfCfg.LoadJSON(testingDiskInfCfg)
ttlDelayTime = diskInfCfg.MetricTTL * 2
os.Exit(m.Run())
}
@ -298,14 +303,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host
dhts := make([]*dht.IpfsDHT, nClusters, nClusters)
tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
// Disable quic as it is proving a bit unstable
//quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
for i := range hosts {
priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
if err != nil {
t.Fatal(err)
}
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr})
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{tcpaddr})
hosts[i] = h
dhts[i] = d
pubsubs[i] = p
@ -382,6 +388,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0])
<-clusters[0].Ready()
bootstrapAddr := clusterAddr(clusters[0])
// Start the rest and join
for i := 1; i < nClusters; i++ {
clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i])
@ -481,9 +488,7 @@ func pinDelay() {
}
func ttlDelay() {
diskInfCfg := &disk.Config{}
diskInfCfg.LoadJSON(testingDiskInfCfg)
time.Sleep(diskInfCfg.MetricTTL * 3)
time.Sleep(ttlDelayTime)
}
// Like waitForLeader but letting metrics expire before waiting, and

View File

@ -111,7 +111,7 @@ func TestClustersPeerAdd(t *testing.T) {
if err != nil {
t.Fatal(err)
}
pinDelay()
ttlDelay()
f := func(t *testing.T, c *Cluster) {
ids := c.Peers(ctx)
@ -420,7 +420,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
// We choose to remove the leader, to make things even more interesting
chosenID, err := clusters[0].consensus.Leader(ctx)
if err != nil {
// choose a random peer
// choose a random peer - crdt
i := rand.Intn(nClusters)
chosenID = clusters[i].host.ID()
}
@ -527,7 +527,7 @@ func TestClustersPeerJoin(t *testing.T) {
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay()
ttlDelay()
for _, p := range clusters {
t.Log(p.id.String())
@ -574,7 +574,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
pinDelay()
ttlDelay()
f2 := func(t *testing.T, c *Cluster) {
peers := c.Peers(ctx)