ipfs-cluster/consensus/raft/consensus_test.go
Hector Sanjuan d57b81490f State: Use go-datastore to implement the state interface
Since the beginning, we have used a Go map to store the shared state (pinset)
in memory. The mapstate knew how to serialize itself so that libp2p-raft would
know how to write to disk when it:

* Saved snapshots of the state on shutdown
* Sent the state to a newcomer peer

hashicorp.Raft assumes an in-memory state which is snapshotted from time to
time and read from disk on boot.

This commit adds a `dsstate` implementation of the state interface using
`go-datastore`. This allows to effortlessly switch to a disk-backed state in
the future (as we will need), and also have at our disposal the different
implementations and utilities of Datastore for fine-tuning (caching, batching
etc.).

`mapstate` has been reworked to use dsstate. Ideally, we would not even need
`mapstate`, as it would suffice to initialize `dsstate` with a
`MapDatastore`. BUT, we still need it separate to be able to auto-migrate to
the new format.

This will be the last migration with the current system. Once this has been
released and users have been able to upgrade we will just remove `mapstate` as
it is now.

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2019-02-19 18:31:14 +00:00

326 lines
7.5 KiB
Go

package raft
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p-host"
peerstore "github.com/libp2p/go-libp2p-peerstore"
)
func cleanRaft(idn int) {
os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn))
}
func testPin(c cid.Cid) api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
return p
}
func makeTestingHost(t *testing.T) host.Host {
h, err := libp2p.New(
context.Background(),
libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
)
if err != nil {
t.Fatal(err)
}
return h
}
func testingConsensus(t *testing.T, idn int) *Consensus {
ctx := context.Background()
cleanRaft(idn)
h := makeTestingHost(t)
st := mapstate.NewMapState()
cfg := &Config{}
cfg.Default()
cfg.DataFolder = fmt.Sprintf("raftFolderFromTests-%d", idn)
cfg.hostShutdown = true
cc, err := NewConsensus(h, cfg, st, false)
if err != nil {
t.Fatal("cannot create Consensus:", err)
}
cc.SetClient(test.NewMockRPCClientWithHost(t, h))
<-cc.Ready(ctx)
return cc
}
func TestShutdownConsensus(t *testing.T) {
ctx := context.Background()
// Bring it up twice to make sure shutdown cleans up properly
// but also to make sure raft comes up ok when re-initialized
cc := testingConsensus(t, 1)
defer cleanRaft(1)
err := cc.Shutdown(ctx)
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
}
err = cc.Shutdown(ctx) // should be fine to shutdown twice
if err != nil {
t.Fatal("Consensus should be able to shutdown several times")
}
cleanRaft(1)
cc = testingConsensus(t, 1)
err = cc.Shutdown(ctx)
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
}
cleanRaft(1)
}
func TestConsensusPin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1) // Remember defer runs in LIFO order
defer cc.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(ctx, testPin(c))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins := st.List(ctx)
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
t.Error("the added pin should be in the state")
}
}
func TestConsensusUnpin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid2)
err := cc.LogUnpin(ctx, api.PinCid(c))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
}
func TestConsensusUpdate(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
// Pin first
c1, _ := cid.Decode(test.TestCid1)
pin := testPin(c1)
pin.Type = api.ShardType
err := cc.LogPin(ctx, pin)
if err != nil {
t.Fatal("the initial operation did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
// Update pin
c2, _ := cid.Decode(test.TestCid2)
pin.Reference = c2
err = cc.LogPin(ctx, pin)
if err != nil {
t.Error("the update op did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins := st.List(ctx)
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
t.Error("the added pin should be in the state")
}
if !pins[0].Reference.Equals(c2) {
t.Error("pin updated incorrectly")
}
}
func TestConsensusAddPeer(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
cc2 := testingConsensus(t, 2)
t.Log(cc.host.ID().Pretty())
t.Log(cc2.host.ID().Pretty())
defer cleanRaft(1)
defer cleanRaft(2)
defer cc.Shutdown(ctx)
defer cc2.Shutdown(ctx)
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
err := cc.AddPeer(ctx, cc2.host.ID())
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = cc2.raft.WaitForPeer(ctx, cc.host.ID().Pretty(), false)
if err != nil {
t.Fatal(err)
}
peers, err := cc2.raft.Peers(ctx)
if err != nil {
t.Fatal(err)
}
if len(peers) != 2 {
t.Error("peer was not added")
}
}
func TestConsensusRmPeer(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
cc2 := testingConsensus(t, 2)
defer cleanRaft(1)
defer cleanRaft(2)
defer cc.Shutdown(ctx)
defer cc2.Shutdown(ctx)
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
err := cc.AddPeer(ctx, cc2.host.ID())
if err != nil {
t.Error("could not add peer:", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), false)
if err != nil {
t.Fatal(err)
}
cc.raft.WaitForLeader(ctx)
c, _ := cid.Decode(test.TestCid1)
err = cc.LogPin(ctx, testPin(c))
if err != nil {
t.Error("could not pin after adding peer:", err)
}
time.Sleep(2 * time.Second)
// Remove unexisting peer
err = cc.RmPeer(ctx, test.TestPeerID1)
if err != nil {
t.Fatal("the operation did not make it to the log:", err)
}
// Remove real peer. At least the leader can succeed
err = cc2.RmPeer(ctx, cc.host.ID())
err2 := cc.RmPeer(ctx, cc2.host.ID())
if err != nil && err2 != nil {
t.Fatal("could not remove peer:", err, err2)
}
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), true)
if err != nil {
t.Fatal(err)
}
}
func TestConsensusLeader(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
pID := cc.host.ID()
defer cleanRaft(1)
defer cc.Shutdown(ctx)
l, err := cc.Leader(ctx)
if err != nil {
t.Fatal("No leader:", err)
}
if l != pID {
t.Errorf("expected %s but the leader appears as %s", pID, l)
}
}
func TestRaftLatestSnapshot(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer cleanRaft(1)
defer cc.Shutdown(ctx)
// Make pin 1
c1, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(ctx, testPin(c1))
if err != nil {
t.Error("the first pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the first snapshot was not taken successfully")
}
// Make pin 2
c2, _ := cid.Decode(test.TestCid2)
err = cc.LogPin(ctx, testPin(c2))
if err != nil {
t.Error("the second pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the second snapshot was not taken successfully")
}
// Make pin 3
c3, _ := cid.Decode(test.TestCid3)
err = cc.LogPin(ctx, testPin(c3))
if err != nil {
t.Error("the third pin did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
err = cc.raft.Snapshot()
if err != nil {
t.Error("the third snapshot was not taken successfully")
}
// Call raft.LastState and ensure we get the correct state
snapState := mapstate.NewMapState()
r, snapExists, err := LastStateRaw(cc.config)
if !snapExists {
t.Fatal("No snapshot found by LastStateRaw")
}
if err != nil {
t.Fatal("Error while taking snapshot", err)
}
err = snapState.Migrate(ctx, r)
if err != nil {
t.Fatal("Snapshot bytes returned could not restore to state: ", err)
}
pins := snapState.List(ctx)
if len(pins) != 3 {
t.Fatal("Latest snapshot not read")
}
}