From 063c5f1b783d05897e73a9ac1ecf9682ad9c605c Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Fri, 9 Aug 2019 16:00:55 +0200 Subject: [PATCH] Service: Select consensus on "init" (not on "daemon") Fixes #865. This makes the necessary changes so that consensu is selected on "init" with a flag set, by default, to "crdt". This generates only a "crdt" or a "raft" section, not both. If the configuration file has a "raft" section, "raft" will be used to start the daemon. If it has a "crdt" section, "crdt" will be used. If it has none or both sections, an error will happen. This also affects "state *" commands, which will now autoselect how to work from the existing configuration. --- Dockerfile | 2 +- Dockerfile-test | 2 +- cmd/ipfs-cluster-service/daemon.go | 36 ++++++-------- cmd/ipfs-cluster-service/lock.go | 2 +- cmd/ipfs-cluster-service/main.go | 66 +++++++++++++------------- cmdutils/configs.go | 39 +++++++++++++-- cmdutils/state.go | 6 +-- config/config.go | 2 +- docker-compose.yml | 6 +-- docker/cluster-restart.sh | 2 +- sharness/lib/test-lib.sh | 15 +++--- sharness/run-sharness-tests.sh | 2 +- sharness/t0021-service-init.sh | 6 +++ sharness/t0052-service-state-export.sh | 9 ++-- sharness/t0053-service-state-import.sh | 15 +++--- sharness/t0054-service-state-clean.sh | 22 ++++----- 16 files changed, 132 insertions(+), 100 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1cdbafa4..894d09c0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -57,4 +57,4 @@ VOLUME $IPFS_CLUSTER_PATH ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/entrypoint.sh"] # Defaults for ipfs-cluster-service go here -CMD ["daemon", "--consensus raft"] +CMD ["daemon"] diff --git a/Dockerfile-test b/Dockerfile-test index cca53e14..f55878ca 100644 --- a/Dockerfile-test +++ b/Dockerfile-test @@ -58,4 +58,4 @@ VOLUME $IPFS_CLUSTER_PATH ENTRYPOINT ["/usr/local/bin/start-daemons.sh"] # Defaults would go here -CMD ["daemon", "--upgrade"] +CMD ["daemon"] diff --git a/cmd/ipfs-cluster-service/daemon.go b/cmd/ipfs-cluster-service/daemon.go index 4e9e9d24..9451b939 100644 --- a/cmd/ipfs-cluster-service/daemon.go +++ b/cmd/ipfs-cluster-service/daemon.go @@ -46,10 +46,6 @@ func parseBootstraps(flagVal []string) (bootstraps []ma.Multiaddr) { // Runs the cluster peer func daemon(c *cli.Context) error { - if c.String("consensus") == "" { - checkErr("starting daemon", errors.New("--consensus flag must be set to \"raft\" or \"crdt\"")) - } - logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...") ctx, cancel := context.WithCancel(context.Background()) @@ -78,7 +74,7 @@ func daemon(c *cli.Context) error { // Cleanup state if bootstrapping raftStaging := false - if len(bootstraps) > 0 && c.String("consensus") == "raft" { + if len(bootstraps) > 0 && cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() { raft.CleanupRaft(cfgs.Raft) raftStaging = true } @@ -159,14 +155,13 @@ func createCluster( tracer, err := observations.SetupTracing(cfgs.Tracing) checkErr("setting up Tracing", err) - store := setupDatastore(c.String("consensus"), cfgHelper.Identity(), cfgs) + store := setupDatastore(cfgHelper) cons, err := setupConsensus( - c.String("consensus"), + cfgHelper, host, dht, pubsub, - cfgs, store, raftStaging, ) @@ -176,7 +171,7 @@ func createCluster( } var peersF func(context.Context) ([]peer.ID, error) - if c.String("consensus") == "raft" { + if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() { peersF = cons.Peers } @@ -293,12 +288,8 @@ func setupPinTracker( } } -func setupDatastore( - consensus string, - ident *config.Identity, - cfgs *cmdutils.Configs, -) ds.Datastore { - stmgr, err := cmdutils.NewStateManager(consensus, ident, cfgs) +func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore { + stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgHelper.Configs()) checkErr("creating state manager", err) store, err := stmgr.GetStore() checkErr("creating datastore", err) @@ -306,19 +297,20 @@ func setupDatastore( } func setupConsensus( - name string, + cfgHelper *cmdutils.ConfigHelper, h host.Host, dht *dht.IpfsDHT, pubsub *pubsub.PubSub, - cfgs *cmdutils.Configs, store ds.Datastore, raftStaging bool, ) (ipfscluster.Consensus, error) { - switch name { - case "raft": + + cfgs := cfgHelper.Configs() + switch cfgHelper.GetConsensus() { + case cfgs.Raft.ConfigKey(): rft, err := raft.NewConsensus( h, - cfgs.Raft, + cfgHelper.Configs().Raft, store, raftStaging, ) @@ -326,12 +318,12 @@ func setupConsensus( return nil, errors.Wrap(err, "creating Raft component") } return rft, nil - case "crdt": + case cfgs.Crdt.ConfigKey(): convrdt, err := crdt.New( h, dht, pubsub, - cfgs.Crdt, + cfgHelper.Configs().Crdt, store, ) if err != nil { diff --git a/cmd/ipfs-cluster-service/lock.go b/cmd/ipfs-cluster-service/lock.go index 5e7c6898..f9ff3301 100644 --- a/cmd/ipfs-cluster-service/lock.go +++ b/cmd/ipfs-cluster-service/lock.go @@ -29,7 +29,7 @@ func (l *lock) lock() { } // we should have a config folder whenever we try to lock - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) + cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "") cfgHelper.MakeConfigFolder() // set the lock file within this function diff --git a/cmd/ipfs-cluster-service/main.go b/cmd/ipfs-cluster-service/main.go index f4fd03eb..41886fcc 100644 --- a/cmd/ipfs-cluster-service/main.go +++ b/cmd/ipfs-cluster-service/main.go @@ -4,6 +4,7 @@ package main import ( "bufio" "context" + "errors" "fmt" "io" "os" @@ -30,6 +31,7 @@ const programName = `ipfs-cluster-service` const ( defaultPinTracker = "map" defaultLogLevel = "info" + defaultConsensus = "crdt" ) const ( @@ -221,26 +223,30 @@ func main() { This command will initialize a new %s configuration file and, if it does already exist, generate a new %s for %s. -If the optional [source-url] is given, the generated configuration file +If the optional [source-url] is given, the generated configuration file will refer to it. The source configuration will be fetched from its source URL during the launch of the daemon. If not, a default standard configuration file will be created. -In the latter case, a cluster secret will be generated as required by %s. -Alternatively, this secret can be manually provided with --custom-secret (in -which case it will be prompted), or by setting the CLUSTER_SECRET environment -variable. +In the latter case, a cluster secret will be generated as required +by %s. Alternatively, this secret can be manually +provided with --custom-secret (in which case it will be prompted), or +by setting the CLUSTER_SECRET environment variable. + +The --consensus flag allows to select an alternative consensus components for +in the newly-generated configuration. Note that the --force flag allows to overwrite an existing configuration with default values. To generate a new identity, please remove the %s file first and clean any Raft state. By default, an empty peerstore file will be created too. Initial contents can -be provided with the -peers flag. In this case, the "trusted_peers" list in -the "crdt" configuration section and the "init_peerset" list in the "raft" -configuration section will be prefilled to the peer IDs in the given -multiaddresses. +be provided with the --peers flag. Depending on the chosen consensus, the +"trusted_peers" list in the "crdt" configuration section and the +"init_peerset" list in the "raft" configuration section will be prefilled to +the peer IDs in the given multiaddresses. `, + DefaultConfigFile, DefaultIdentityFile, programName, @@ -249,6 +255,11 @@ multiaddresses. ), ArgsUsage: "[http-source-url]", Flags: []cli.Flag{ + cli.StringFlag{ + Name: "consensus", + Usage: "select consensus component: 'crdt' or 'raft'", + Value: defaultConsensus, + }, cli.BoolFlag{ Name: "custom-secret, s", Usage: "prompt for the cluster secret (when no source specified)", @@ -263,7 +274,12 @@ multiaddresses. }, }, Action: func(c *cli.Context) error { - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) + consensus := c.String("consensus") + if consensus != "raft" && consensus != "crdt" { + checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'")) + } + + cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus) defer cfgHelper.Manager().Shutdown() // wait for saves configExists := false @@ -375,10 +391,6 @@ multiaddresses. Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"", Hidden: true, }, - cli.StringFlag{ - Name: "consensus", - Usage: "shared state management provider [raft,crdt]", - }, cli.StringFlag{ Name: "pintracker", Value: defaultPinTracker, @@ -418,16 +430,12 @@ By default, the state will be printed to stdout. Value: "", Usage: "writes to an output file", }, - cli.StringFlag{ - Name: "consensus", - Usage: "consensus component to export data from [raft, crdt]", - }, }, Action: func(c *cli.Context) error { locker.lock() defer locker.tryUnlock() - mgr := getStateManager(c.String("consensus")) + mgr := getStateManager() var w io.WriteCloser var err error @@ -463,10 +471,6 @@ to import. If no argument is provided, stdin will be used. Name: "force, f", Usage: "skips confirmation prompt", }, - cli.StringFlag{ - Name: "consensus", - Usage: "consensus component to export data from [raft, crdt]", - }, }, Action: func(c *cli.Context) error { locker.lock() @@ -478,7 +482,7 @@ to import. If no argument is provided, stdin will be used. return nil } - mgr := getStateManager(c.String("consensus")) + mgr := getStateManager() // Get the importing file path importFile := c.Args().First() @@ -511,10 +515,6 @@ to all effects. Peers may need to bootstrap and sync from scratch after this. Name: "force, f", Usage: "skip confirmation prompt", }, - cli.StringFlag{ - Name: "consensus", - Usage: "consensus component to export data from [raft, crdt]", - }, }, Action: func(c *cli.Context) error { locker.lock() @@ -528,7 +528,7 @@ to all effects. Peers may need to bootstrap and sync from scratch after this. return nil } - mgr := getStateManager(c.String("consensus")) + mgr := getStateManager() checkErr("cleaning state", mgr.Clean()) logger.Info("data correctly cleaned up") return nil @@ -608,21 +608,21 @@ func yesNoPrompt(prompt string) bool { func loadConfigHelper() *cmdutils.ConfigHelper { // Load all the configurations and identity - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath) + cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "") err := cfgHelper.LoadFromDisk() checkErr("loading identity or configurations", err) return cfgHelper } -func getStateManager(consensus string) cmdutils.StateManager { +func getStateManager() cmdutils.StateManager { cfgHelper := loadConfigHelper() // since we won't save configs we can shutdown cfgHelper.Manager().Shutdown() mgr, err := cmdutils.NewStateManager( - consensus, + cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgHelper.Configs(), ) - checkErr("creating state manager,", err) + checkErr("creating state manager", err) return mgr } diff --git a/cmdutils/configs.go b/cmdutils/configs.go index 4a2d7eed..31e861f0 100644 --- a/cmdutils/configs.go +++ b/cmdutils/configs.go @@ -50,14 +50,16 @@ type ConfigHelper struct { configPath string identityPath string + consensus string } // NewConfigHelper creates a config helper given the paths to the // configuration and identity files. -func NewConfigHelper(configPath, identityPath string) *ConfigHelper { +func NewConfigHelper(configPath, identityPath, consensus string) *ConfigHelper { ch := &ConfigHelper{ configPath: configPath, identityPath: identityPath, + consensus: consensus, } ch.init() return ch @@ -138,6 +140,29 @@ func (ch *ConfigHelper) Configs() *Configs { return ch.configs } +// GetConsensus attempts to return the configured consensus. +// If the ConfigHelper was initialized with a consensus string +// then it returns that. +// +// Otherwise it checks whether one of the consensus configurations +// has been loaded. If both or non have been loaded, it returns +// an empty string. +func (ch *ConfigHelper) GetConsensus() string { + if ch.consensus != "" { + return ch.consensus + } + crdtLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Crdt.ConfigKey()) + raftLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Raft.ConfigKey()) + if crdtLoaded == raftLoaded { //both loaded or none + return "" + } + + if crdtLoaded { + return ch.configs.Crdt.ConfigKey() + } + return ch.configs.Raft.ConfigKey() +} + // register all current cluster components func (ch *ConfigHelper) init() { man := config.NewManager() @@ -160,8 +185,6 @@ func (ch *ConfigHelper) init() { man.RegisterComponent(config.API, cfgs.Restapi) man.RegisterComponent(config.API, cfgs.Ipfsproxy) man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp) - man.RegisterComponent(config.Consensus, cfgs.Raft) - man.RegisterComponent(config.Consensus, cfgs.Crdt) man.RegisterComponent(config.PinTracker, cfgs.Maptracker) man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker) man.RegisterComponent(config.Monitor, cfgs.Pubsubmon) @@ -170,6 +193,16 @@ func (ch *ConfigHelper) init() { man.RegisterComponent(config.Observations, cfgs.Tracing) man.RegisterComponent(config.Datastore, cfgs.Badger) + switch ch.consensus { + case cfgs.Raft.ConfigKey(): + man.RegisterComponent(config.Consensus, cfgs.Raft) + case cfgs.Crdt.ConfigKey(): + man.RegisterComponent(config.Consensus, cfgs.Crdt) + default: + man.RegisterComponent(config.Consensus, cfgs.Raft) + man.RegisterComponent(config.Consensus, cfgs.Crdt) + } + ch.identity = &config.Identity{} ch.manager = man ch.configs = cfgs diff --git a/cmdutils/state.go b/cmdutils/state.go index 0f5b539f..978d0e5c 100644 --- a/cmdutils/state.go +++ b/cmdutils/state.go @@ -33,12 +33,12 @@ type StateManager interface { // consensus ("raft" or "crdt"). It will need initialized configs. func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) { switch consensus { - case "raft": + case cfgs.Raft.ConfigKey(): return &raftStateManager{ident, cfgs}, nil - case "crdt": + case cfgs.Crdt.ConfigKey(): return &crdtStateManager{ident, cfgs}, nil case "": - return nil, errors.New("unspecified consensus component") + return nil, errors.New("could not determine the consensus component") default: return nil, fmt.Errorf("unknown consensus component '%s'", consensus) } diff --git a/config/config.go b/config/config.go index c7025a72..3b9db1a7 100644 --- a/config/config.go +++ b/config/config.go @@ -423,7 +423,7 @@ func (cfg *Manager) LoadJSON(bs []byte) error { logger.Debugf("%s component configuration loaded", name) } else { cfg.undefinedComps[t][name] = true - logger.Warningf("%s component is empty, generating default", name) + logger.Debugf("%s component is empty, generating default", name) component.Default() } diff --git a/docker-compose.yml b/docker-compose.yml index f523103c..e8beca57 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -82,14 +82,14 @@ services: # to it. command: >- sh -c ' - cmd="daemon --consensus raft" - if [ ! -d /data/ipfs-cluster/raft ]; then + cmd="daemon" + if [ ! -d /data/ipfs-cluster/badger ]; then while ! ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id; do sleep 1 done pid=`ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id | grep -o -E "^(\w+)"` sleep 10 - cmd="daemon --consensus raft --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid" + cmd="daemon --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid" fi exec /usr/local/bin/entrypoint.sh $$cmd ' diff --git a/docker/cluster-restart.sh b/docker/cluster-restart.sh index 93cb6636..1965d5ec 100755 --- a/docker/cluster-restart.sh +++ b/docker/cluster-restart.sh @@ -4,6 +4,6 @@ sleep 4 while true; do export CLUSTER_SECRET="" - pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --consensus raft --debug & + pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --debug & sleep 10 done diff --git a/sharness/lib/test-lib.sh b/sharness/lib/test-lib.sh index 8040c6b1..43270f0d 100755 --- a/sharness/lib/test-lib.sh +++ b/sharness/lib/test-lib.sh @@ -60,6 +60,10 @@ test_ipfs_running() { test_cluster_init() { custom_config_files="$1" + consensus="$2" + if [ -z "$consensus" ]; then + consensus="crdt" + fi which ipfs-cluster-service >/dev/null 2>&1 if [ $? -ne 0 ]; then @@ -71,7 +75,7 @@ test_cluster_init() { echo "cluster init FAIL: ipfs-cluster-ctl not found" exit 1 fi - ipfs-cluster-service -f --config "test-config" init >"$IPFS_OUTPUT" 2>&1 + ipfs-cluster-service --config "test-config" init --force --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1 if [ $? -ne 0 ]; then echo "cluster init FAIL: error on ipfs cluster init" exit 1 @@ -80,7 +84,7 @@ test_cluster_init() { if [ -n "$custom_config_files" ]; then cp -f ${custom_config_files}/* "test-config" fi - cluster_start $2 + cluster_start } test_cluster_config() { @@ -121,12 +125,7 @@ cluster_kill(){ } cluster_start(){ - consensus="$1" - if [ -z "$consensus" ]; then - consensus="crdt" - fi - - ipfs-cluster-service --config "test-config" daemon --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1 & + ipfs-cluster-service --config "test-config" daemon >"$IPFS_OUTPUT" 2>&1 & while ! curl -s 'localhost:9095/api/v0/version' >/dev/null; do sleep 0.2 done diff --git a/sharness/run-sharness-tests.sh b/sharness/run-sharness-tests.sh index 38cda1ba..df8405f0 100755 --- a/sharness/run-sharness-tests.sh +++ b/sharness/run-sharness-tests.sh @@ -6,7 +6,7 @@ statuses=0 for i in t0*.sh; do echo "*** $i ***" - ./$i --verbose + ./$i status=$? statuses=$((statuses + $status)) if [ $status -ne 0 ]; then diff --git a/sharness/t0021-service-init.sh b/sharness/t0021-service-init.sh index 3421e40c..f293f6e3 100755 --- a/sharness/t0021-service-init.sh +++ b/sharness/t0021-service-init.sh @@ -18,6 +18,12 @@ test_expect_success "cluster-service init without --peers succeeds and creates e [ ! -s "test-config/peerstore" ] ' +test_expect_success "cluster-service init with raft generates only raft config" ' + ipfs-cluster-service --config "test-config" init -f --consensus raft && + [ "$(jq -M -r .consensus.raft test-config/service.json)" != "null" ] && + [ "$(jq -M -r .consensus.crdt test-config/service.json)" == "null" ] +' + test_clean_cluster test_done diff --git a/sharness/t0052-service-state-export.sh b/sharness/t0052-service-state-export.sh index d2b84de9..d0dcdccc 100755 --- a/sharness/t0052-service-state-export.sh +++ b/sharness/t0052-service-state-export.sh @@ -5,27 +5,28 @@ test_description="Test service state export" . lib/test-lib.sh test_ipfs_init -test_cluster_init +test_cluster_init "" crdt test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (crdt)" ' cid=`docker exec ipfs sh -c "echo test_52-1 | ipfs add -q"` && ipfs-cluster-ctl pin add "$cid" && sleep 5 && cluster_kill && sleep 5 && - ipfs-cluster-service --debug --config "test-config" state export --consensus crdt -f export.json && + ipfs-cluster-service --debug --config "test-config" state export -f export.json && [ -f export.json ] && jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid" ' cluster_kill -cluster_start raft +sleep 5 +test_cluster_init "" raft test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (raft)" ' cid=`docker exec ipfs sh -c "echo test_52-2 | ipfs add -q"` && ipfs-cluster-ctl pin add "$cid" && sleep 5 && cluster_kill && sleep 5 && - ipfs-cluster-service --debug --config "test-config" state export --consensus raft -f export.json && + ipfs-cluster-service --debug --config "test-config" state export -f export.json && [ -f export.json ] && jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid" ' diff --git a/sharness/t0053-service-state-import.sh b/sharness/t0053-service-state-import.sh index 929056a6..68e4a726 100755 --- a/sharness/t0053-service-state-import.sh +++ b/sharness/t0053-service-state-import.sh @@ -17,14 +17,14 @@ cluster_kill test_expect_success IPFS,CLUSTER "state import fails on incorrect format (crdt)" ' sleep 5 && echo "not exactly json" > badImportFile && - test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus crdt -f badImportFile + test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile ' test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (crdt)" ' sleep 5 cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && - ipfs-cluster-service --config "test-config" state import --consensus crdt -f importState && - cluster_start crdt && + ipfs-cluster-service --config "test-config" state import -f importState && + cluster_start && sleep 5 && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" @@ -32,18 +32,19 @@ test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct f # Kill cluster daemon but keep data folder cluster_kill +sleep 5 test_expect_success IPFS,CLUSTER "state import fails on incorrect format (raft)" ' - sleep 5 && + ipfs-cluster-service --config "test-config" init --force --consensus raft && echo "not exactly json" > badImportFile && - test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus raft -f badImportFile + test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile ' test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (raft)" ' sleep 5 cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && - ipfs-cluster-service --config "test-config" state import --consensus raft -f importState && - cluster_start raft && + ipfs-cluster-service --config "test-config" state import -f importState && + cluster_start && sleep 5 && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" diff --git a/sharness/t0054-service-state-clean.sh b/sharness/t0054-service-state-clean.sh index 5821a44c..556a5e50 100755 --- a/sharness/t0054-service-state-clean.sh +++ b/sharness/t0054-service-state-clean.sh @@ -14,7 +14,7 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f && + ipfs-cluster-service --config "test-config" state cleanup -f && cluster_start && sleep 5 && [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] ' @@ -24,9 +24,9 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" ' ipfs-cluster-ctl pin add "$cid" && sleep 5 && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state export --consensus crdt -f import.json && - ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f && - ipfs-cluster-service --config "test-config" state import --consensus crdt -f import.json && + ipfs-cluster-service --config "test-config" state export -f import.json && + ipfs-cluster-service --config "test-config" state cleanup -f && + ipfs-cluster-service --config "test-config" state import -f import.json && cluster_start && sleep 5 && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && @@ -35,7 +35,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" ' cluster_kill sleep 5 -cluster_start "raft" +test_cluster_init "" raft test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft)" ' cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && @@ -44,8 +44,8 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f && - cluster_start raft && sleep 5 && + ipfs-cluster-service --config "test-config" state cleanup -f && + cluster_start && sleep 5 && [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] ' @@ -54,10 +54,10 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" ' ipfs-cluster-ctl pin add "$cid" && sleep 5 && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state export --consensus raft -f import.json && - ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f && - ipfs-cluster-service --config "test-config" state import --consensus raft -f import.json && - cluster_start raft && sleep 5 && + ipfs-cluster-service --config "test-config" state export -f import.json && + ipfs-cluster-service --config "test-config" state cleanup -f && + ipfs-cluster-service --config "test-config" state import -f import.json && + cluster_start && sleep 5 && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]