rpc auth: adjust tests to work with trusted-peer-enabled crdts component

This commit is contained in:
Hector Sanjuan 2019-05-09 21:24:56 +02:00
parent a86c7cae2b
commit 6530808298
3 changed files with 46 additions and 1 deletions

View File

@ -317,6 +317,13 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
// Start the rest and join
for i := 1; i < nClusters; i++ {
clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i])
for j := 0; j < i; j++ {
// all previous clusters trust the new one
clusters[j].consensus.Trust(ctx, hosts[i].ID())
// new cluster trusts all the previous
clusters[i].consensus.Trust(ctx, hosts[j].ID())
}
err := clusters[i].Join(ctx, bootstrapAddr)
if err != nil {
logger.Error(err)
@ -1545,6 +1552,8 @@ func TestClustersReplicationNotEnoughPeers(t *testing.T) {
c.config.ReplicationFactorMax = nClusters - 1
}
ttlDelay()
j := rand.Intn(nClusters)
h := test.Cid1
err := clusters[j].Pin(ctx, api.PinCid(h))
@ -1710,6 +1719,8 @@ func TestClustersGraphConnected(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
ttlDelay()
j := rand.Intn(nClusters) // choose a random cluster peer to query
graph, err := clusters[j].ConnectGraph()
if err != nil {

View File

@ -90,12 +90,20 @@ func TestClustersPeerAdd(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !containsPeer(id.ClusterPeers, clusters[0].id) {
// ClusterPeers is originally empty and contains nodes as we add them
t.Log(i, id.ClusterPeers)
t.Fatal("cluster peers should be up to date with the cluster")
}
for j := 0; j < i; j++ {
if err := clusters[j].consensus.Trust(ctx, clusters[i].id); err != nil {
t.Fatal(err)
}
if err := clusters[i].consensus.Trust(ctx, clusters[j].id); err != nil {
t.Fatal(err)
}
}
}
h := test.Cid1
@ -234,6 +242,7 @@ func TestClustersPeerAddInUnhealthyCluster(t *testing.T) {
t.Error(err)
}
ttlDelay()
ids = clusters[0].Peers(ctx)
if len(ids) != 2 {
t.Error("cluster should have 2 peers after removing and adding 1")
@ -506,11 +515,21 @@ func TestClustersPeerJoin(t *testing.T) {
}
for i := 1; i < len(clusters); i++ {
for j := 0; j < i; j++ {
if err := clusters[j].consensus.Trust(ctx, clusters[i].id); err != nil {
t.Fatal(err)
}
if err := clusters[i].consensus.Trust(ctx, clusters[j].id); err != nil {
t.Fatal(err)
}
}
err := clusters[i].Join(ctx, clusterAddr(clusters[0]))
if err != nil {
t.Fatal(err)
}
}
hash := test.Cid1
clusters[0].Pin(ctx, api.PinCid(hash))
pinDelay()
@ -551,6 +570,10 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
}
f := func(t *testing.T, c *Cluster) {
if err := c.consensus.Trust(ctx, clusters[0].id); err != nil {
t.Fatal(err)
}
err := c.Join(ctx, clusterAddr(clusters[0]))
if err != nil {
t.Fatal(err)
@ -643,6 +666,15 @@ func TestClustersPeerRejoin(t *testing.T) {
// add all clusters
for i := 1; i < len(clusters); i++ {
for j := 0; j < i; j++ {
if err := clusters[j].consensus.Trust(ctx, clusters[i].id); err != nil {
t.Fatal(err)
}
if err := clusters[i].consensus.Trust(ctx, clusters[j].id); err != nil {
t.Fatal(err)
}
}
err := clusters[i].Join(ctx, clusterAddr(clusters[0]))
if err != nil {
t.Fatal(err)
@ -687,6 +719,7 @@ func TestClustersPeerRejoin(t *testing.T) {
c0, m0 := createOnePeerCluster(t, 0, testingClusterSecret)
clusters[0] = c0
mocks[0] = m0
c0.consensus.Trust(ctx, clusters[1].id)
err = c0.Join(ctx, clusterAddr(clusters[1]))
if err != nil {
t.Fatal(err)

View File

@ -49,6 +49,7 @@ func TestSimplePNet(t *testing.T) {
if err != nil {
t.Fatal(err)
}
ttlDelay()
if len(clusters[0].Peers(ctx)) != len(clusters[1].Peers(ctx)) {
t.Fatal("Expected same number of peers")