ipfs-cluster/consensus/crdt/consensus_test.go
Hector Sanjuan acbd7fda60 Consensus: add new "crdt" consensus component
This adds a new "crdt" consensus component using go-ds-crdt.

This implies several refactors to fully make cluster consensus-component
independent:

* Delete mapstate and fully adopt dsstate (after people have migrated).
* Return errors from state methods rather than ignoring them.
* Add a new "datastore" modules so that we can configure datastores in the
   main configuration like other components.
* Let the consensus components fully define the "state.State". Thus, they do
not receive the state, they receive the storage where we put the state (a
go-datastore).
* Allow to customize how the monitor component obtains Peers() (the current
  peerset), including avoiding using the current peerset. At the moment the
  crdt consensus uses the monitoring component to define the current peerset.
  Therefore the monitor component cannot rely on the consensus component to
  produce a peerset.
* Re-factor/re-implementation of "ipfs-cluster-service state"
  operations. Includes the dissapearance of the "migrate" one.

The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite)
and uses it to intitialize a dssate. Thus the crdt-store is elegantly
wrapped. Any modifications to the state get automatically replicated to other
peers. We store all the CRDT DAG blocks in the local datastore.

The consensus components only expose a ReadOnly state, as any modifications to
the shared state should happen through them.

DHT and PubSub facilities must now be created outside of Cluster and passed in
so they can be re-used by different components.
2019-04-17 19:14:26 +02:00

294 lines
5.8 KiB
Go

package crdt
import (
"context"
"fmt"
"testing"
"time"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/datastore/inmem"
"github.com/ipfs/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p-host"
dht "github.com/libp2p/go-libp2p-kad-dht"
peerstore "github.com/libp2p/go-libp2p-peerstore"
pubsub "github.com/libp2p/go-libp2p-pubsub"
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
)
func makeTestingHost(t *testing.T) (host.Host, *pubsub.PubSub, *dht.IpfsDHT) {
ctx := context.Background()
h, err := libp2p.New(
ctx,
libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
)
if err != nil {
t.Fatal(err)
}
psub, err := pubsub.NewGossipSub(
ctx,
h,
pubsub.WithMessageSigning(true),
pubsub.WithStrictSignatureVerification(true),
)
if err != nil {
h.Close()
t.Fatal(err)
}
idht, err := dht.New(ctx, h)
if err != nil {
h.Close()
t.Fatal(err)
}
btstrCfg := dht.BootstrapConfig{
Queries: 1,
Period: 200 * time.Millisecond,
Timeout: 100 * time.Millisecond,
}
err = idht.BootstrapWithConfig(ctx, btstrCfg)
if err != nil {
h.Close()
t.Fatal(err)
}
rHost := routedhost.Wrap(h, idht)
return rHost, psub, idht
}
func testingConsensus(t *testing.T, idn int) *Consensus {
h, psub, dht := makeTestingHost(t)
cfg := &Config{}
cfg.Default()
cfg.DatastoreNamespace = fmt.Sprintf("crdttest-%d", idn)
cfg.hostShutdown = true
cc, err := New(h, dht, psub, cfg, inmem.New())
if err != nil {
t.Fatal("cannot create Consensus:", err)
}
cc.SetClient(test.NewMockRPCClientWithHost(t, h))
<-cc.Ready(context.Background())
return cc
}
func clean(t *testing.T, cc *Consensus) {
err := cc.Clean(context.Background())
if err != nil {
t.Error(err)
}
}
func testPin(c cid.Cid) *api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
return p
}
func TestShutdownConsensus(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
err := cc.Shutdown(ctx)
if err != nil {
t.Fatal("Consensus cannot shutdown:", err)
}
err = cc.Shutdown(ctx) // should be fine to shutdown twice
if err != nil {
t.Fatal("Consensus should be able to shutdown several times")
}
}
func TestConsensusPin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
defer cc.Shutdown(ctx)
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error(err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins, err := st.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
}
func TestConsensusUnpin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
defer cc.Shutdown(ctx)
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error(err)
}
err = cc.LogUnpin(ctx, api.PinCid(test.Cid1))
if err != nil {
t.Error(err)
}
}
func TestConsensusUpdate(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
defer cc.Shutdown(ctx)
// Pin first
pin := testPin(test.Cid1)
pin.Type = api.ShardType
err := cc.LogPin(ctx, pin)
if err != nil {
t.Fatal(err)
}
time.Sleep(250 * time.Millisecond)
// Update pin
pin.Reference = &test.Cid2
err = cc.LogPin(ctx, pin)
if err != nil {
t.Error(err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins, err := st.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
if !pins[0].Reference.Equals(test.Cid2) {
t.Error("pin updated incorrectly")
}
}
func TestConsensusAddRmPeer(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
cc2 := testingConsensus(t, 2)
defer clean(t, cc)
defer clean(t, cc)
defer cc.Shutdown(ctx)
defer cc2.Shutdown(ctx)
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
_, err := cc.host.Network().DialPeer(ctx, cc2.host.ID())
if err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
err = cc.AddPeer(ctx, cc2.host.ID())
if err != nil {
t.Error("could not add peer:", err)
}
// Make a pin on peer1 and check it arrived to peer2
err = cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error(err)
}
time.Sleep(250 * time.Millisecond)
st, err := cc2.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
}
pins, err := st.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
err = cc2.RmPeer(ctx, cc.host.ID())
if err == nil {
t.Error("crdt consensus should not remove pins")
}
}
func TestPeers(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
defer cc.Shutdown(ctx)
peers, err := cc.Peers(ctx)
if err != nil {
t.Fatal(err)
}
// 1 is ourselves and the other comes from rpc
// mock PeerMonitorLatestMetrics
if len(peers) != 2 {
t.Error("unexpected number of peers")
}
}
func TestOfflineState(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
defer clean(t, cc)
defer cc.Shutdown(ctx)
// Make pin 1
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error(err)
}
// Make pin 2
err = cc.LogPin(ctx, testPin(test.Cid2))
if err != nil {
t.Error(err)
}
err = cc.Shutdown(ctx)
if err != nil {
t.Fatal(err)
}
offlineState, err := OfflineState(cc.config, cc.store)
if err != nil {
t.Fatal(err)
}
pins, err := offlineState.List(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 2 {
t.Error("there should be two pins in the state")
}
}