ipfs-cluster/cluster_test.go
Hector Sanjuan 8f06baa1bf Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:

  * Each component is initialized with a configuration object
  defined by its module
  * Each component decides how the JSON representation of its
  configuration looks like
  * Each component parses and validates its own configuration
  * Each component exposes its own defaults
  * Component configurations are make the sections of a
  central JSON configuration file (which replaces the current
  JSON format)
  * Component configurations implement a common interface
  (config.ComponentConfig) with a set of common operations
  * The central configuration file is managed by a
  config.ConfigManager which:
    * Registers ComponentConfigs
    * Assigns the correspondent sections from the JSON file to each
    component and delegates the parsing
    * Delegates the JSON generation for each section
    * Can be notified when the configuration is updated and must be
    saved to disk

The new service.json would then look as follows:

```json
{
  "cluster": {
    "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
    "private_key": "<...>",
    "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
    "peers": [],
    "bootstrap": [],
    "leave_on_shutdown": false,
    "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
    "state_sync_interval": "1m0s",
    "ipfs_sync_interval": "2m10s",
    "replication_factor": -1,
    "monitor_ping_interval": "15s"
  },
  "consensus": {
    "raft": {
      "heartbeat_timeout": "1s",
      "election_timeout": "1s",
      "commit_timeout": "50ms",
      "max_append_entries": 64,
      "trailing_logs": 10240,
      "snapshot_interval": "2m0s",
      "snapshot_threshold": 8192,
      "leader_lease_timeout": "500ms"
    }
  },
  "api": {
    "restapi": {
      "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
      "read_timeout": "30s",
      "read_header_timeout": "5s",
      "write_timeout": "1m0s",
      "idle_timeout": "2m0s"
    }
  },
  "ipfs_connector": {
    "ipfshttp": {
      "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
      "node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
      "connect_swarms_delay": "7s",
      "proxy_read_timeout": "10m0s",
      "proxy_read_header_timeout": "5s",
      "proxy_write_timeout": "10m0s",
      "proxy_idle_timeout": "1m0s"
    }
  },
  "monitor": {
    "monbasic": {
      "check_interval": "15s"
    }
  },
  "informer": {
    "disk": {
      "metric_ttl": "30s",
      "metric_type": "freespace"
    },
    "numpin": {
      "metric_ttl": "10s"
    }
  }
}
```

This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.

Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-18 00:00:12 +02:00

292 lines
6.2 KiB
Go

package ipfscluster
import (
"errors"
"os"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/allocator/ascendalloc"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/monitor/basic"
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
)
type mockComponent struct {
rpcClient *rpc.Client
returnError bool
}
func (c *mockComponent) Shutdown() error {
return nil
}
func (c *mockComponent) SetClient(client *rpc.Client) {
c.rpcClient = client
return
}
type mockAPI struct {
mockComponent
}
type mockConnector struct {
mockComponent
}
func (ipfs *mockConnector) ID() (api.IPFSID, error) {
if ipfs.returnError {
return api.IPFSID{}, errors.New("")
}
return api.IPFSID{
ID: test.TestPeerID1,
}, nil
}
func (ipfs *mockConnector) Pin(c *cid.Cid) error {
if ipfs.returnError {
return errors.New("")
}
return nil
}
func (ipfs *mockConnector) Unpin(c *cid.Cid) error {
if ipfs.returnError {
return errors.New("")
}
return nil
}
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (api.IPFSPinStatus, error) {
if ipfs.returnError {
return api.IPFSPinStatusError, errors.New("")
}
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs(filter string) (map[string]api.IPFSPinStatus, error) {
if ipfs.returnError {
return nil, errors.New("")
}
m := make(map[string]api.IPFSPinStatus)
return m, nil
}
func (ipfs *mockConnector) ConnectSwarms() error { return nil }
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
func (ipfs *mockConnector) FreeSpace() (int, error) { return 100, nil }
func (ipfs *mockConnector) RepoSize() (int, error) { return 0, nil }
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *maptracker.MapPinTracker) {
clusterCfg, _, _, consensusCfg, monCfg, _ := testingConfigs()
api := &mockAPI{}
ipfs := &mockConnector{}
st := mapstate.NewMapState()
tracker := maptracker.NewMapPinTracker(clusterCfg.ID)
monCfg.CheckInterval = 2 * time.Second
mon, _ := basic.NewMonitor(monCfg)
alloc := ascendalloc.NewAllocator()
numpinCfg := &numpin.Config{}
numpinCfg.Default()
inf, _ := numpin.NewInformer(numpinCfg)
cl, err := NewCluster(
clusterCfg,
consensusCfg,
api,
ipfs,
st,
tracker,
mon,
alloc,
inf)
if err != nil {
t.Fatal("cannot create cluster:", err)
}
<-cl.Ready()
return cl, api, ipfs, st, tracker
}
func cleanRaft() {
os.RemoveAll("raftFolderFromTests")
}
func testClusterShutdown(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
err := cl.Shutdown()
if err != nil {
t.Error("cluster shutdown failed:", err)
}
cl.Shutdown()
cl, _, _, _, _ = testingCluster(t)
err = cl.Shutdown()
if err != nil {
t.Error("cluster shutdown failed:", err)
}
}
func TestClusterStateSync(t *testing.T) {
cleanRaft()
cl, _, _, st, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
_, err := cl.StateSync()
if err == nil {
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(test.TestCid1)
err = cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
_, err = cl.StateSync()
if err != nil {
t.Fatal("sync after pinning should have worked:", err)
}
// Modify state on the side so the sync does not
// happen on an empty slide
st.Rm(c)
_, err = cl.StateSync()
if err != nil {
t.Fatal("sync with recover should have worked:", err)
}
}
func TestClusterID(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
id := cl.ID()
if len(id.Addresses) == 0 {
t.Error("expected more addresses")
}
if id.ID == "" {
t.Error("expected a cluster ID")
}
if id.Version != Version {
t.Error("version should match current version")
}
//if id.PublicKey == nil {
// t.Error("publicKey should not be empty")
//}
}
func TestClusterPin(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
// test an error case
cl.consensus.Shutdown()
err = cl.Pin(api.PinCid(c))
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestClusterPins(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
pins := cl.Pins()
if len(pins) != 1 {
t.Fatal("pin should be part of the state")
}
if !pins[0].Cid.Equals(c) || pins[0].ReplicationFactor != -1 {
t.Error("the Pin does not look as expected")
}
}
func TestClusterPinGet(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}
pin, err := cl.PinGet(c)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(c) || pin.ReplicationFactor != -1 {
t.Error("the Pin does not look as expected")
}
c2, _ := cid.Decode(test.TestCid2)
_, err = cl.PinGet(c2)
if err == nil {
t.Fatal("expected an error")
}
}
func TestClusterUnpin(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(test.TestCid1)
err := cl.Unpin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
}
// test an error case
cl.consensus.Shutdown()
err = cl.Unpin(c)
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestClusterPeers(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
peers := cl.Peers()
if len(peers) != 1 {
t.Fatal("expected 1 peer")
}
clusterCfg := &Config{}
clusterCfg.LoadJSON(testingClusterCfg)
if peers[0].ID != clusterCfg.ID {
t.Error("bad member")
}
}
func TestVersion(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()
if cl.Version() != Version {
t.Error("bad Version()")
}
}