ipfs-cluster/consensus/raft/consensus_test.go
Hector Sanjuan acbd7fda60 Consensus: add new "crdt" consensus component
This adds a new "crdt" consensus component using go-ds-crdt.

This implies several refactors to fully make cluster consensus-component
independent:

* Delete mapstate and fully adopt dsstate (after people have migrated).
* Return errors from state methods rather than ignoring them.
* Add a new "datastore" modules so that we can configure datastores in the
   main configuration like other components.
* Let the consensus components fully define the "state.State". Thus, they do
not receive the state, they receive the storage where we put the state (a
go-datastore).
* Allow to customize how the monitor component obtains Peers() (the current
  peerset), including avoiding using the current peerset. At the moment the
  crdt consensus uses the monitoring component to define the current peerset.
  Therefore the monitor component cannot rely on the consensus component to
  produce a peerset.
* Re-factor/re-implementation of "ipfs-cluster-service state"
  operations. Includes the dissapearance of the "migrate" one.

The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite)
and uses it to intitialize a dssate. Thus the crdt-store is elegantly
wrapped. Any modifications to the state get automatically replicated to other
peers. We store all the CRDT DAG blocks in the local datastore.

The consensus components only expose a ReadOnly state, as any modifications to
the shared state should happen through them.

DHT and PubSub facilities must now be created outside of Cluster and passed in
so they can be re-used by different components.
2019-04-17 19:14:26 +02:00

330 lines
7.4 KiB
Go

package raft
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/datastore/inmem"
"github.com/ipfs/ipfs-cluster/state/dsstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p-host"
peerstore "github.com/libp2p/go-libp2p-peerstore"
)
func cleanRaft(idn int) {
os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn))
}
func testPin(c cid.Cid) *api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
return p
}
func makeTestingHost(t *testing.T) host.Host {
h, err := libp2p.New(
context.Background(),
libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
)
if err != nil {
t.Fatal(err)
}
return h
}
func testingConsensus(t *testing.T, idn int) *Consensus {
ctx := context.Background()
cleanRaft(idn)
h := makeTestingHost(t)
cfg := &Config{}
cfg.Default()
cfg.DataFolder = fmt.Sprintf("raftFolderFromTests-%d", idn)
cfg.hostShutdown = true
cc, err := NewConsensus(h, cfg, inmem.New(), false)
if err != nil {
t.Fatal("cannot create Consensus:", err)
}
cc.SetClient(test.NewMockRPCClientWithHost(t, h))
<-cc.Ready(ctx)
return cc
}
func TestShutdownConsensus(t *testing.T) {
ctx := context.Background()
// Bring it up twice to make sure shutdown cleans up properly
// but also to make sure raft comes up ok when re-initialized
cc := testingConsensus(t, 1)
defer cleanRaft(1)
err := cc.Shutdown(ctx)
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
}
err = cc.Shutdown(ctx) // should be fine to shutdown twice
if err != nil {
t.Fatal("Consensus should be able to shutdown several times")
}
cleanRaft(1)
cc = testingConsensus(t, 1)
err = cc.Shutdown(ctx)
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
}
cleanRaft(1)
}
func TestConsensusPin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1) // Remember defer runs in LIFO order
defer cc.Shutdown(ctx)
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins, err := st.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
}
func TestConsensusUnpin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
err := cc.LogUnpin(ctx, api.PinCid(test.Cid1))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
}
func TestConsensusUpdate(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
// Pin first
pin := testPin(test.Cid1)
pin.Type = api.ShardType
err := cc.LogPin(ctx, pin)
if err != nil {
t.Fatal("the initial operation did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
// Update pin
pin.Reference = &test.Cid2
err = cc.LogPin(ctx, pin)
if err != nil {
t.Error("the update op did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins, err := st.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
if !pins[0].Reference.Equals(test.Cid2) {
t.Error("pin updated incorrectly")
}
}
func TestConsensusAddPeer(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
cc2 := testingConsensus(t, 2)
t.Log(cc.host.ID().Pretty())
t.Log(cc2.host.ID().Pretty())
defer cleanRaft(1)
defer cleanRaft(2)
defer cc.Shutdown(ctx)
defer cc2.Shutdown(ctx)
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
err := cc.AddPeer(ctx, cc2.host.ID())
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = cc2.raft.WaitForPeer(ctx, cc.host.ID().Pretty(), false)
if err != nil {
t.Fatal(err)
}
peers, err := cc2.raft.Peers(ctx)
if err != nil {
t.Fatal(err)
}
if len(peers) != 2 {
t.Error("peer was not added")
}
}
func TestConsensusRmPeer(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
cc2 := testingConsensus(t, 2)
defer cleanRaft(1)
defer cleanRaft(2)
defer cc.Shutdown(ctx)
defer cc2.Shutdown(ctx)
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
err := cc.AddPeer(ctx, cc2.host.ID())
if err != nil {
t.Error("could not add peer:", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), false)
if err != nil {
t.Fatal(err)
}
cc.raft.WaitForLeader(ctx)
err = cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("could not pin after adding peer:", err)
}
time.Sleep(2 * time.Second)
// Remove unexisting peer
err = cc.RmPeer(ctx, test.PeerID1)
if err != nil {
t.Fatal("the operation did not make it to the log:", err)
}
// Remove real peer. At least the leader can succeed
err = cc2.RmPeer(ctx, cc.host.ID())
err2 := cc.RmPeer(ctx, cc2.host.ID())
if err != nil && err2 != nil {
t.Fatal("could not remove peer:", err, err2)
}
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), true)
if err != nil {
t.Fatal(err)
}
}
func TestConsensusLeader(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
pID := cc.host.ID()
defer cleanRaft(1)
defer cc.Shutdown(ctx)
l, err := cc.Leader(ctx)
if err != nil {
t.Fatal("No leader:", err)
}
if l != pID {
t.Errorf("expected %s but the leader appears as %s", pID, l)
}
}
func TestRaftLatestSnapshot(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
// Make pin 1
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("the first pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the first snapshot was not taken successfully")
}
// Make pin 2
err = cc.LogPin(ctx, testPin(test.Cid2))
if err != nil {
t.Error("the second pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the second snapshot was not taken successfully")
}
// Make pin 3
err = cc.LogPin(ctx, testPin(test.Cid3))
if err != nil {
t.Error("the third pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the third snapshot was not taken successfully")
}
// Call raft.LastState and ensure we get the correct state
snapState, err := dsstate.New(inmem.New(), "", dsstate.DefaultHandle())
if err != nil {
t.Fatal(err)
}
r, snapExists, err := LastStateRaw(cc.config)
if !snapExists {
t.Fatal("No snapshot found by LastStateRaw")
}
if err != nil {
t.Fatal("Error while taking snapshot", err)
}
err = snapState.Unmarshal(r)
if err != nil {
t.Fatal("Snapshot bytes returned could not restore to state: ", err)
}
pins, err := snapState.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 3 {
t.Fatal("Latest snapshot not read")
}
}