Service: Select consensus on "init" (not on "daemon")

Fixes #865.

This makes the necessary changes so that consensu is selected on "init" with a
flag set, by default, to "crdt". This generates only a "crdt" or a "raft"
section, not both.

If the configuration file has a "raft" section, "raft" will be used to start
the daemon. If it has a "crdt" section, "crdt" will be used. If it has none or
both sections, an error will happen.

This also affects "state *" commands, which will now autoselect how to work
from the existing configuration.
This commit is contained in:
Hector Sanjuan 2019-08-09 16:00:55 +02:00
parent 00e78a6b6d
commit 063c5f1b78
16 changed files with 132 additions and 100 deletions

View File

@ -57,4 +57,4 @@ VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/entrypoint.sh"] ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/entrypoint.sh"]
# Defaults for ipfs-cluster-service go here # Defaults for ipfs-cluster-service go here
CMD ["daemon", "--consensus raft"] CMD ["daemon"]

View File

@ -58,4 +58,4 @@ VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/usr/local/bin/start-daemons.sh"] ENTRYPOINT ["/usr/local/bin/start-daemons.sh"]
# Defaults would go here # Defaults would go here
CMD ["daemon", "--upgrade"] CMD ["daemon"]

View File

@ -46,10 +46,6 @@ func parseBootstraps(flagVal []string) (bootstraps []ma.Multiaddr) {
// Runs the cluster peer // Runs the cluster peer
func daemon(c *cli.Context) error { func daemon(c *cli.Context) error {
if c.String("consensus") == "" {
checkErr("starting daemon", errors.New("--consensus flag must be set to \"raft\" or \"crdt\""))
}
logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...") logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...")
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -78,7 +74,7 @@ func daemon(c *cli.Context) error {
// Cleanup state if bootstrapping // Cleanup state if bootstrapping
raftStaging := false raftStaging := false
if len(bootstraps) > 0 && c.String("consensus") == "raft" { if len(bootstraps) > 0 && cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
raft.CleanupRaft(cfgs.Raft) raft.CleanupRaft(cfgs.Raft)
raftStaging = true raftStaging = true
} }
@ -159,14 +155,13 @@ func createCluster(
tracer, err := observations.SetupTracing(cfgs.Tracing) tracer, err := observations.SetupTracing(cfgs.Tracing)
checkErr("setting up Tracing", err) checkErr("setting up Tracing", err)
store := setupDatastore(c.String("consensus"), cfgHelper.Identity(), cfgs) store := setupDatastore(cfgHelper)
cons, err := setupConsensus( cons, err := setupConsensus(
c.String("consensus"), cfgHelper,
host, host,
dht, dht,
pubsub, pubsub,
cfgs,
store, store,
raftStaging, raftStaging,
) )
@ -176,7 +171,7 @@ func createCluster(
} }
var peersF func(context.Context) ([]peer.ID, error) var peersF func(context.Context) ([]peer.ID, error)
if c.String("consensus") == "raft" { if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
peersF = cons.Peers peersF = cons.Peers
} }
@ -293,12 +288,8 @@ func setupPinTracker(
} }
} }
func setupDatastore( func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore {
consensus string, stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgHelper.Configs())
ident *config.Identity,
cfgs *cmdutils.Configs,
) ds.Datastore {
stmgr, err := cmdutils.NewStateManager(consensus, ident, cfgs)
checkErr("creating state manager", err) checkErr("creating state manager", err)
store, err := stmgr.GetStore() store, err := stmgr.GetStore()
checkErr("creating datastore", err) checkErr("creating datastore", err)
@ -306,19 +297,20 @@ func setupDatastore(
} }
func setupConsensus( func setupConsensus(
name string, cfgHelper *cmdutils.ConfigHelper,
h host.Host, h host.Host,
dht *dht.IpfsDHT, dht *dht.IpfsDHT,
pubsub *pubsub.PubSub, pubsub *pubsub.PubSub,
cfgs *cmdutils.Configs,
store ds.Datastore, store ds.Datastore,
raftStaging bool, raftStaging bool,
) (ipfscluster.Consensus, error) { ) (ipfscluster.Consensus, error) {
switch name {
case "raft": cfgs := cfgHelper.Configs()
switch cfgHelper.GetConsensus() {
case cfgs.Raft.ConfigKey():
rft, err := raft.NewConsensus( rft, err := raft.NewConsensus(
h, h,
cfgs.Raft, cfgHelper.Configs().Raft,
store, store,
raftStaging, raftStaging,
) )
@ -326,12 +318,12 @@ func setupConsensus(
return nil, errors.Wrap(err, "creating Raft component") return nil, errors.Wrap(err, "creating Raft component")
} }
return rft, nil return rft, nil
case "crdt": case cfgs.Crdt.ConfigKey():
convrdt, err := crdt.New( convrdt, err := crdt.New(
h, h,
dht, dht,
pubsub, pubsub,
cfgs.Crdt, cfgHelper.Configs().Crdt,
store, store,
) )
if err != nil { if err != nil {

View File

@ -29,7 +29,7 @@ func (l *lock) lock() {
} }
// we should have a config folder whenever we try to lock // we should have a config folder whenever we try to lock
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "")
cfgHelper.MakeConfigFolder() cfgHelper.MakeConfigFolder()
// set the lock file within this function // set the lock file within this function

View File

@ -4,6 +4,7 @@ package main
import ( import (
"bufio" "bufio"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -30,6 +31,7 @@ const programName = `ipfs-cluster-service`
const ( const (
defaultPinTracker = "map" defaultPinTracker = "map"
defaultLogLevel = "info" defaultLogLevel = "info"
defaultConsensus = "crdt"
) )
const ( const (
@ -221,26 +223,30 @@ func main() {
This command will initialize a new %s configuration file and, if it This command will initialize a new %s configuration file and, if it
does already exist, generate a new %s for %s. does already exist, generate a new %s for %s.
If the optional [source-url] is given, the generated configuration file If the optional [source-url] is given, the generated configuration file
will refer to it. The source configuration will be fetched from its source will refer to it. The source configuration will be fetched from its source
URL during the launch of the daemon. If not, a default standard configuration URL during the launch of the daemon. If not, a default standard configuration
file will be created. file will be created.
In the latter case, a cluster secret will be generated as required by %s. In the latter case, a cluster secret will be generated as required
Alternatively, this secret can be manually provided with --custom-secret (in by %s. Alternatively, this secret can be manually
which case it will be prompted), or by setting the CLUSTER_SECRET environment provided with --custom-secret (in which case it will be prompted), or
variable. by setting the CLUSTER_SECRET environment variable.
The --consensus flag allows to select an alternative consensus components for
in the newly-generated configuration.
Note that the --force flag allows to overwrite an existing Note that the --force flag allows to overwrite an existing
configuration with default values. To generate a new identity, please configuration with default values. To generate a new identity, please
remove the %s file first and clean any Raft state. remove the %s file first and clean any Raft state.
By default, an empty peerstore file will be created too. Initial contents can By default, an empty peerstore file will be created too. Initial contents can
be provided with the -peers flag. In this case, the "trusted_peers" list in be provided with the --peers flag. Depending on the chosen consensus, the
the "crdt" configuration section and the "init_peerset" list in the "raft" "trusted_peers" list in the "crdt" configuration section and the
configuration section will be prefilled to the peer IDs in the given "init_peerset" list in the "raft" configuration section will be prefilled to
multiaddresses. the peer IDs in the given multiaddresses.
`, `,
DefaultConfigFile, DefaultConfigFile,
DefaultIdentityFile, DefaultIdentityFile,
programName, programName,
@ -249,6 +255,11 @@ multiaddresses.
), ),
ArgsUsage: "[http-source-url]", ArgsUsage: "[http-source-url]",
Flags: []cli.Flag{ Flags: []cli.Flag{
cli.StringFlag{
Name: "consensus",
Usage: "select consensus component: 'crdt' or 'raft'",
Value: defaultConsensus,
},
cli.BoolFlag{ cli.BoolFlag{
Name: "custom-secret, s", Name: "custom-secret, s",
Usage: "prompt for the cluster secret (when no source specified)", Usage: "prompt for the cluster secret (when no source specified)",
@ -263,7 +274,12 @@ multiaddresses.
}, },
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) consensus := c.String("consensus")
if consensus != "raft" && consensus != "crdt" {
checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'"))
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus)
defer cfgHelper.Manager().Shutdown() // wait for saves defer cfgHelper.Manager().Shutdown() // wait for saves
configExists := false configExists := false
@ -375,10 +391,6 @@ multiaddresses.
Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"", Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"",
Hidden: true, Hidden: true,
}, },
cli.StringFlag{
Name: "consensus",
Usage: "shared state management provider [raft,crdt]",
},
cli.StringFlag{ cli.StringFlag{
Name: "pintracker", Name: "pintracker",
Value: defaultPinTracker, Value: defaultPinTracker,
@ -418,16 +430,12 @@ By default, the state will be printed to stdout.
Value: "", Value: "",
Usage: "writes to an output file", Usage: "writes to an output file",
}, },
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
locker.lock() locker.lock()
defer locker.tryUnlock() defer locker.tryUnlock()
mgr := getStateManager(c.String("consensus")) mgr := getStateManager()
var w io.WriteCloser var w io.WriteCloser
var err error var err error
@ -463,10 +471,6 @@ to import. If no argument is provided, stdin will be used.
Name: "force, f", Name: "force, f",
Usage: "skips confirmation prompt", Usage: "skips confirmation prompt",
}, },
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
locker.lock() locker.lock()
@ -478,7 +482,7 @@ to import. If no argument is provided, stdin will be used.
return nil return nil
} }
mgr := getStateManager(c.String("consensus")) mgr := getStateManager()
// Get the importing file path // Get the importing file path
importFile := c.Args().First() importFile := c.Args().First()
@ -511,10 +515,6 @@ to all effects. Peers may need to bootstrap and sync from scratch after this.
Name: "force, f", Name: "force, f",
Usage: "skip confirmation prompt", Usage: "skip confirmation prompt",
}, },
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
locker.lock() locker.lock()
@ -528,7 +528,7 @@ to all effects. Peers may need to bootstrap and sync from scratch after this.
return nil return nil
} }
mgr := getStateManager(c.String("consensus")) mgr := getStateManager()
checkErr("cleaning state", mgr.Clean()) checkErr("cleaning state", mgr.Clean())
logger.Info("data correctly cleaned up") logger.Info("data correctly cleaned up")
return nil return nil
@ -608,21 +608,21 @@ func yesNoPrompt(prompt string) bool {
func loadConfigHelper() *cmdutils.ConfigHelper { func loadConfigHelper() *cmdutils.ConfigHelper {
// Load all the configurations and identity // Load all the configurations and identity
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "")
err := cfgHelper.LoadFromDisk() err := cfgHelper.LoadFromDisk()
checkErr("loading identity or configurations", err) checkErr("loading identity or configurations", err)
return cfgHelper return cfgHelper
} }
func getStateManager(consensus string) cmdutils.StateManager { func getStateManager() cmdutils.StateManager {
cfgHelper := loadConfigHelper() cfgHelper := loadConfigHelper()
// since we won't save configs we can shutdown // since we won't save configs we can shutdown
cfgHelper.Manager().Shutdown() cfgHelper.Manager().Shutdown()
mgr, err := cmdutils.NewStateManager( mgr, err := cmdutils.NewStateManager(
consensus, cfgHelper.GetConsensus(),
cfgHelper.Identity(), cfgHelper.Identity(),
cfgHelper.Configs(), cfgHelper.Configs(),
) )
checkErr("creating state manager,", err) checkErr("creating state manager", err)
return mgr return mgr
} }

View File

@ -50,14 +50,16 @@ type ConfigHelper struct {
configPath string configPath string
identityPath string identityPath string
consensus string
} }
// NewConfigHelper creates a config helper given the paths to the // NewConfigHelper creates a config helper given the paths to the
// configuration and identity files. // configuration and identity files.
func NewConfigHelper(configPath, identityPath string) *ConfigHelper { func NewConfigHelper(configPath, identityPath, consensus string) *ConfigHelper {
ch := &ConfigHelper{ ch := &ConfigHelper{
configPath: configPath, configPath: configPath,
identityPath: identityPath, identityPath: identityPath,
consensus: consensus,
} }
ch.init() ch.init()
return ch return ch
@ -138,6 +140,29 @@ func (ch *ConfigHelper) Configs() *Configs {
return ch.configs return ch.configs
} }
// GetConsensus attempts to return the configured consensus.
// If the ConfigHelper was initialized with a consensus string
// then it returns that.
//
// Otherwise it checks whether one of the consensus configurations
// has been loaded. If both or non have been loaded, it returns
// an empty string.
func (ch *ConfigHelper) GetConsensus() string {
if ch.consensus != "" {
return ch.consensus
}
crdtLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Crdt.ConfigKey())
raftLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Raft.ConfigKey())
if crdtLoaded == raftLoaded { //both loaded or none
return ""
}
if crdtLoaded {
return ch.configs.Crdt.ConfigKey()
}
return ch.configs.Raft.ConfigKey()
}
// register all current cluster components // register all current cluster components
func (ch *ConfigHelper) init() { func (ch *ConfigHelper) init() {
man := config.NewManager() man := config.NewManager()
@ -160,8 +185,6 @@ func (ch *ConfigHelper) init() {
man.RegisterComponent(config.API, cfgs.Restapi) man.RegisterComponent(config.API, cfgs.Restapi)
man.RegisterComponent(config.API, cfgs.Ipfsproxy) man.RegisterComponent(config.API, cfgs.Ipfsproxy)
man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp) man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp)
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
man.RegisterComponent(config.PinTracker, cfgs.Maptracker) man.RegisterComponent(config.PinTracker, cfgs.Maptracker)
man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker) man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker)
man.RegisterComponent(config.Monitor, cfgs.Pubsubmon) man.RegisterComponent(config.Monitor, cfgs.Pubsubmon)
@ -170,6 +193,16 @@ func (ch *ConfigHelper) init() {
man.RegisterComponent(config.Observations, cfgs.Tracing) man.RegisterComponent(config.Observations, cfgs.Tracing)
man.RegisterComponent(config.Datastore, cfgs.Badger) man.RegisterComponent(config.Datastore, cfgs.Badger)
switch ch.consensus {
case cfgs.Raft.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Raft)
case cfgs.Crdt.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Crdt)
default:
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
}
ch.identity = &config.Identity{} ch.identity = &config.Identity{}
ch.manager = man ch.manager = man
ch.configs = cfgs ch.configs = cfgs

View File

@ -33,12 +33,12 @@ type StateManager interface {
// consensus ("raft" or "crdt"). It will need initialized configs. // consensus ("raft" or "crdt"). It will need initialized configs.
func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) { func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) {
switch consensus { switch consensus {
case "raft": case cfgs.Raft.ConfigKey():
return &raftStateManager{ident, cfgs}, nil return &raftStateManager{ident, cfgs}, nil
case "crdt": case cfgs.Crdt.ConfigKey():
return &crdtStateManager{ident, cfgs}, nil return &crdtStateManager{ident, cfgs}, nil
case "": case "":
return nil, errors.New("unspecified consensus component") return nil, errors.New("could not determine the consensus component")
default: default:
return nil, fmt.Errorf("unknown consensus component '%s'", consensus) return nil, fmt.Errorf("unknown consensus component '%s'", consensus)
} }

View File

@ -423,7 +423,7 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
logger.Debugf("%s component configuration loaded", name) logger.Debugf("%s component configuration loaded", name)
} else { } else {
cfg.undefinedComps[t][name] = true cfg.undefinedComps[t][name] = true
logger.Warningf("%s component is empty, generating default", name) logger.Debugf("%s component is empty, generating default", name)
component.Default() component.Default()
} }

View File

@ -82,14 +82,14 @@ services:
# to it. # to it.
command: >- command: >-
sh -c ' sh -c '
cmd="daemon --consensus raft" cmd="daemon"
if [ ! -d /data/ipfs-cluster/raft ]; then if [ ! -d /data/ipfs-cluster/badger ]; then
while ! ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id; do while ! ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id; do
sleep 1 sleep 1
done done
pid=`ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id | grep -o -E "^(\w+)"` pid=`ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id | grep -o -E "^(\w+)"`
sleep 10 sleep 10
cmd="daemon --consensus raft --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid" cmd="daemon --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid"
fi fi
exec /usr/local/bin/entrypoint.sh $$cmd exec /usr/local/bin/entrypoint.sh $$cmd
' '

View File

@ -4,6 +4,6 @@
sleep 4 sleep 4
while true; do while true; do
export CLUSTER_SECRET="" export CLUSTER_SECRET=""
pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --consensus raft --debug & pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --debug &
sleep 10 sleep 10
done done

View File

@ -60,6 +60,10 @@ test_ipfs_running() {
test_cluster_init() { test_cluster_init() {
custom_config_files="$1" custom_config_files="$1"
consensus="$2"
if [ -z "$consensus" ]; then
consensus="crdt"
fi
which ipfs-cluster-service >/dev/null 2>&1 which ipfs-cluster-service >/dev/null 2>&1
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
@ -71,7 +75,7 @@ test_cluster_init() {
echo "cluster init FAIL: ipfs-cluster-ctl not found" echo "cluster init FAIL: ipfs-cluster-ctl not found"
exit 1 exit 1
fi fi
ipfs-cluster-service -f --config "test-config" init >"$IPFS_OUTPUT" 2>&1 ipfs-cluster-service --config "test-config" init --force --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "cluster init FAIL: error on ipfs cluster init" echo "cluster init FAIL: error on ipfs cluster init"
exit 1 exit 1
@ -80,7 +84,7 @@ test_cluster_init() {
if [ -n "$custom_config_files" ]; then if [ -n "$custom_config_files" ]; then
cp -f ${custom_config_files}/* "test-config" cp -f ${custom_config_files}/* "test-config"
fi fi
cluster_start $2 cluster_start
} }
test_cluster_config() { test_cluster_config() {
@ -121,12 +125,7 @@ cluster_kill(){
} }
cluster_start(){ cluster_start(){
consensus="$1" ipfs-cluster-service --config "test-config" daemon >"$IPFS_OUTPUT" 2>&1 &
if [ -z "$consensus" ]; then
consensus="crdt"
fi
ipfs-cluster-service --config "test-config" daemon --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1 &
while ! curl -s 'localhost:9095/api/v0/version' >/dev/null; do while ! curl -s 'localhost:9095/api/v0/version' >/dev/null; do
sleep 0.2 sleep 0.2
done done

View File

@ -6,7 +6,7 @@ statuses=0
for i in t0*.sh; for i in t0*.sh;
do do
echo "*** $i ***" echo "*** $i ***"
./$i --verbose ./$i
status=$? status=$?
statuses=$((statuses + $status)) statuses=$((statuses + $status))
if [ $status -ne 0 ]; then if [ $status -ne 0 ]; then

View File

@ -18,6 +18,12 @@ test_expect_success "cluster-service init without --peers succeeds and creates e
[ ! -s "test-config/peerstore" ] [ ! -s "test-config/peerstore" ]
' '
test_expect_success "cluster-service init with raft generates only raft config" '
ipfs-cluster-service --config "test-config" init -f --consensus raft &&
[ "$(jq -M -r .consensus.raft test-config/service.json)" != "null" ] &&
[ "$(jq -M -r .consensus.crdt test-config/service.json)" == "null" ]
'
test_clean_cluster test_clean_cluster
test_done test_done

View File

@ -5,27 +5,28 @@ test_description="Test service state export"
. lib/test-lib.sh . lib/test-lib.sh
test_ipfs_init test_ipfs_init
test_cluster_init test_cluster_init "" crdt
test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (crdt)" ' test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (crdt)" '
cid=`docker exec ipfs sh -c "echo test_52-1 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_52-1 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" && ipfs-cluster-ctl pin add "$cid" &&
sleep 5 && sleep 5 &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --debug --config "test-config" state export --consensus crdt -f export.json && ipfs-cluster-service --debug --config "test-config" state export -f export.json &&
[ -f export.json ] && [ -f export.json ] &&
jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid" jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid"
' '
cluster_kill cluster_kill
cluster_start raft sleep 5
test_cluster_init "" raft
test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (raft)" ' test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (raft)" '
cid=`docker exec ipfs sh -c "echo test_52-2 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_52-2 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" && ipfs-cluster-ctl pin add "$cid" &&
sleep 5 && sleep 5 &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --debug --config "test-config" state export --consensus raft -f export.json && ipfs-cluster-service --debug --config "test-config" state export -f export.json &&
[ -f export.json ] && [ -f export.json ] &&
jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid" jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid"
' '

View File

@ -17,14 +17,14 @@ cluster_kill
test_expect_success IPFS,CLUSTER "state import fails on incorrect format (crdt)" ' test_expect_success IPFS,CLUSTER "state import fails on incorrect format (crdt)" '
sleep 5 && sleep 5 &&
echo "not exactly json" > badImportFile && echo "not exactly json" > badImportFile &&
test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus crdt -f badImportFile test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile
' '
test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (crdt)" ' test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (crdt)" '
sleep 5 sleep 5
cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` &&
ipfs-cluster-service --config "test-config" state import --consensus crdt -f importState && ipfs-cluster-service --config "test-config" state import -f importState &&
cluster_start crdt && cluster_start &&
sleep 5 && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED"
@ -32,18 +32,19 @@ test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct f
# Kill cluster daemon but keep data folder # Kill cluster daemon but keep data folder
cluster_kill cluster_kill
sleep 5
test_expect_success IPFS,CLUSTER "state import fails on incorrect format (raft)" ' test_expect_success IPFS,CLUSTER "state import fails on incorrect format (raft)" '
sleep 5 && ipfs-cluster-service --config "test-config" init --force --consensus raft &&
echo "not exactly json" > badImportFile && echo "not exactly json" > badImportFile &&
test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus raft -f badImportFile test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile
' '
test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (raft)" ' test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (raft)" '
sleep 5 sleep 5
cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` &&
ipfs-cluster-service --config "test-config" state import --consensus raft -f importState && ipfs-cluster-service --config "test-config" state import -f importState &&
cluster_start raft && cluster_start &&
sleep 5 && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED"

View File

@ -14,7 +14,7 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start && sleep 5 && cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
' '
@ -24,9 +24,9 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export --consensus crdt -f import.json && ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
ipfs-cluster-service --config "test-config" state import --consensus crdt -f import.json && ipfs-cluster-service --config "test-config" state import -f import.json &&
cluster_start && sleep 5 && cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
@ -35,7 +35,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
cluster_kill cluster_kill
sleep 5 sleep 5
cluster_start "raft" test_cluster_init "" raft
test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft)" ' test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft)" '
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
@ -44,8 +44,8 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start raft && sleep 5 && cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
' '
@ -54,10 +54,10 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export --consensus raft -f import.json && ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
ipfs-cluster-service --config "test-config" state import --consensus raft -f import.json && ipfs-cluster-service --config "test-config" state import -f import.json &&
cluster_start raft && sleep 5 && cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]