ipfs-cluster/cluster_test.go
Wyatt 47b744f1c0 ipfs-cluster-service state upgrade cli command
ipfs-cluster-service now has a migration subcommand that upgrades
    persistant state snapshots with an out-of-date format version to the
    newest version of raft state. If all cluster members shutdown with
    consistent state, upgrade ipfs-cluster, and run the state upgrade command,
    the new version of cluster will be compatible with persistent storage.
    ipfs-cluster now validates its persistent state upon loading it and exits
    with a clear error in the case the state format version is not up to date.

    Raft snapshotting is enforced on all shutdowns and the json backup is no
    longer run.  This commit makes use of recent changes to libp2p-raft
    allowing raft states to implement their own marshaling strategies. Now
    mapstate handles the logic for its (de)serialization.  In the interest of
    supporting various potential upgrade formats the state serialization
    begins with a varint (right now one byte) describing the version.

    Some go tests are modified and a go test is added to cover new ipfs-cluster
    raft snapshot reading functions.  Sharness tests are added to cover the
    state upgrade command.
2017-11-28 22:35:48 -05:00

296 lines
6.3 KiB
Go

package ipfscluster
import (
"errors"
"os"
"path/filepath"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/allocator/ascendalloc"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/monitor/basic"
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
)
type mockComponent struct {
rpcClient *rpc.Client
returnError bool
}
func (c *mockComponent) Shutdown() error {
return nil
}
func (c *mockComponent) SetClient(client *rpc.Client) {
c.rpcClient = client
return
}
type mockAPI struct {
mockComponent
}
type mockConnector struct {
mockComponent
}
func (ipfs *mockConnector) ID() (api.IPFSID, error) {
if ipfs.returnError {
return api.IPFSID{}, errors.New("")
}
return api.IPFSID{
ID: test.TestPeerID1,
}, nil
}
func (ipfs *mockConnector) Pin(c *cid.Cid) error {
if ipfs.returnError {
return errors.New("")
}
return nil
}
func (ipfs *mockConnector) Unpin(c *cid.Cid) error {
if ipfs.returnError {
return errors.New("")
}
return nil
}
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (api.IPFSPinStatus, error) {
if ipfs.returnError {
return api.IPFSPinStatusError, errors.New("")
}
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs(filter string) (map[string]api.IPFSPinStatus, error) {
if ipfs.returnError {
return nil, errors.New("")
}
m := make(map[string]api.IPFSPinStatus)
return m, nil
}
func (ipfs *mockConnector) ConnectSwarms() error { return nil }
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
func (ipfs *mockConnector) FreeSpace() (uint64, error) { return 100, nil }
func (ipfs *mockConnector) RepoSize() (uint64, error) { return 0, nil }
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *maptracker.MapPinTracker) {
clusterCfg, _, _, consensusCfg, monCfg, _ := testingConfigs()
api := &mockAPI{}
ipfs := &mockConnector{}
st := mapstate.NewMapState()
tracker := maptracker.NewMapPinTracker(clusterCfg.ID)
monCfg.CheckInterval = 2 * time.Second
mon, _ := basic.NewMonitor(monCfg)
alloc := ascendalloc.NewAllocator()
numpinCfg := &numpin.Config{}
numpinCfg.Default()
inf, _ := numpin.NewInformer(numpinCfg)
cl, err := NewCluster(
clusterCfg,
consensusCfg,
api,
ipfs,
st,
tracker,
mon,
alloc,
inf)
if err != nil {
t.Fatal("cannot create cluster:", err)
}
<-cl.Ready()
return cl, api, ipfs, st, tracker
}
func cleanRaft() {
raftDirs, _ := filepath.Glob("raftFolderFromTests*")
for _, dir := range raftDirs {
os.RemoveAll(dir)
}
}
func testClusterShutdown(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
err := cl.Shutdown()
if err != nil {
t.Error("cluster shutdown failed:", err)
}
cl.Shutdown()
cl, _, _, _, _ = testingCluster(t)
err = cl.Shutdown()
if err != nil {
t.Error("cluster shutdown failed:", err)
}
}
func TestClusterStateSync(t *testing.T) {
cleanRaft()
cl, _, _, st, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
_, err := cl.StateSync()
if err == nil {
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(test.TestCid1)
err = cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
_, err = cl.StateSync()
if err != nil {
t.Fatal("sync after pinning should have worked:", err)
}
// Modify state on the side so the sync does not
// happen on an empty slide
st.Rm(c)
_, err = cl.StateSync()
if err != nil {
t.Fatal("sync with recover should have worked:", err)
}
}
func TestClusterID(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
id := cl.ID()
if len(id.Addresses) == 0 {
t.Error("expected more addresses")
}
if id.ID == "" {
t.Error("expected a cluster ID")
}
if id.Version != Version {
t.Error("version should match current version")
}
//if id.PublicKey == nil {
// t.Error("publicKey should not be empty")
//}
}
func TestClusterPin(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
// test an error case
cl.consensus.Shutdown()
err = cl.Pin(api.PinCid(c))
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestClusterPins(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
pins := cl.Pins()
if len(pins) != 1 {
t.Fatal("pin should be part of the state")
}
if !pins[0].Cid.Equals(c) || pins[0].ReplicationFactor != -1 {
t.Error("the Pin does not look as expected")
}
}
func TestClusterPinGet(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
pin, err := cl.PinGet(c)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(c) || pin.ReplicationFactor != -1 {
t.Error("the Pin does not look as expected")
}
c2, _ := cid.Decode(test.TestCid2)
_, err = cl.PinGet(c2)
if err == nil {
t.Fatal("expected an error")
}
}
func TestClusterUnpin(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Unpin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
}
// test an error case
cl.consensus.Shutdown()
err = cl.Unpin(c)
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestClusterPeers(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
peers := cl.Peers()
if len(peers) != 1 {
t.Fatal("expected 1 peer")
}
clusterCfg := &Config{}
clusterCfg.LoadJSON(testingClusterCfg)
if peers[0].ID != clusterCfg.ID {
t.Error("bad member")
}
}
func TestVersion(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
if cl.Version() != Version {
t.Error("bad Version()")
}
}