Service: Select consensus on "init" (not on "daemon")

Fixes #865.

This makes the necessary changes so that consensu is selected on "init" with a
flag set, by default, to "crdt". This generates only a "crdt" or a "raft"
section, not both.

If the configuration file has a "raft" section, "raft" will be used to start
the daemon. If it has a "crdt" section, "crdt" will be used. If it has none or
both sections, an error will happen.

This also affects "state *" commands, which will now autoselect how to work
from the existing configuration.
This commit is contained in:
Hector Sanjuan 2019-08-09 16:00:55 +02:00
parent 00e78a6b6d
commit 063c5f1b78
16 changed files with 132 additions and 100 deletions

View File

@ -57,4 +57,4 @@ VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/entrypoint.sh"]
# Defaults for ipfs-cluster-service go here
CMD ["daemon", "--consensus raft"]
CMD ["daemon"]

View File

@ -58,4 +58,4 @@ VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/usr/local/bin/start-daemons.sh"]
# Defaults would go here
CMD ["daemon", "--upgrade"]
CMD ["daemon"]

View File

@ -46,10 +46,6 @@ func parseBootstraps(flagVal []string) (bootstraps []ma.Multiaddr) {
// Runs the cluster peer
func daemon(c *cli.Context) error {
if c.String("consensus") == "" {
checkErr("starting daemon", errors.New("--consensus flag must be set to \"raft\" or \"crdt\""))
}
logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...")
ctx, cancel := context.WithCancel(context.Background())
@ -78,7 +74,7 @@ func daemon(c *cli.Context) error {
// Cleanup state if bootstrapping
raftStaging := false
if len(bootstraps) > 0 && c.String("consensus") == "raft" {
if len(bootstraps) > 0 && cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
raft.CleanupRaft(cfgs.Raft)
raftStaging = true
}
@ -159,14 +155,13 @@ func createCluster(
tracer, err := observations.SetupTracing(cfgs.Tracing)
checkErr("setting up Tracing", err)
store := setupDatastore(c.String("consensus"), cfgHelper.Identity(), cfgs)
store := setupDatastore(cfgHelper)
cons, err := setupConsensus(
c.String("consensus"),
cfgHelper,
host,
dht,
pubsub,
cfgs,
store,
raftStaging,
)
@ -176,7 +171,7 @@ func createCluster(
}
var peersF func(context.Context) ([]peer.ID, error)
if c.String("consensus") == "raft" {
if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
peersF = cons.Peers
}
@ -293,12 +288,8 @@ func setupPinTracker(
}
}
func setupDatastore(
consensus string,
ident *config.Identity,
cfgs *cmdutils.Configs,
) ds.Datastore {
stmgr, err := cmdutils.NewStateManager(consensus, ident, cfgs)
func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore {
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgHelper.Configs())
checkErr("creating state manager", err)
store, err := stmgr.GetStore()
checkErr("creating datastore", err)
@ -306,19 +297,20 @@ func setupDatastore(
}
func setupConsensus(
name string,
cfgHelper *cmdutils.ConfigHelper,
h host.Host,
dht *dht.IpfsDHT,
pubsub *pubsub.PubSub,
cfgs *cmdutils.Configs,
store ds.Datastore,
raftStaging bool,
) (ipfscluster.Consensus, error) {
switch name {
case "raft":
cfgs := cfgHelper.Configs()
switch cfgHelper.GetConsensus() {
case cfgs.Raft.ConfigKey():
rft, err := raft.NewConsensus(
h,
cfgs.Raft,
cfgHelper.Configs().Raft,
store,
raftStaging,
)
@ -326,12 +318,12 @@ func setupConsensus(
return nil, errors.Wrap(err, "creating Raft component")
}
return rft, nil
case "crdt":
case cfgs.Crdt.ConfigKey():
convrdt, err := crdt.New(
h,
dht,
pubsub,
cfgs.Crdt,
cfgHelper.Configs().Crdt,
store,
)
if err != nil {

View File

@ -29,7 +29,7 @@ func (l *lock) lock() {
}
// we should have a config folder whenever we try to lock
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "")
cfgHelper.MakeConfigFolder()
// set the lock file within this function

View File

@ -4,6 +4,7 @@ package main
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
@ -30,6 +31,7 @@ const programName = `ipfs-cluster-service`
const (
defaultPinTracker = "map"
defaultLogLevel = "info"
defaultConsensus = "crdt"
)
const (
@ -226,21 +228,25 @@ will refer to it. The source configuration will be fetched from its source
URL during the launch of the daemon. If not, a default standard configuration
file will be created.
In the latter case, a cluster secret will be generated as required by %s.
Alternatively, this secret can be manually provided with --custom-secret (in
which case it will be prompted), or by setting the CLUSTER_SECRET environment
variable.
In the latter case, a cluster secret will be generated as required
by %s. Alternatively, this secret can be manually
provided with --custom-secret (in which case it will be prompted), or
by setting the CLUSTER_SECRET environment variable.
The --consensus flag allows to select an alternative consensus components for
in the newly-generated configuration.
Note that the --force flag allows to overwrite an existing
configuration with default values. To generate a new identity, please
remove the %s file first and clean any Raft state.
By default, an empty peerstore file will be created too. Initial contents can
be provided with the -peers flag. In this case, the "trusted_peers" list in
the "crdt" configuration section and the "init_peerset" list in the "raft"
configuration section will be prefilled to the peer IDs in the given
multiaddresses.
be provided with the --peers flag. Depending on the chosen consensus, the
"trusted_peers" list in the "crdt" configuration section and the
"init_peerset" list in the "raft" configuration section will be prefilled to
the peer IDs in the given multiaddresses.
`,
DefaultConfigFile,
DefaultIdentityFile,
programName,
@ -249,6 +255,11 @@ multiaddresses.
),
ArgsUsage: "[http-source-url]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "consensus",
Usage: "select consensus component: 'crdt' or 'raft'",
Value: defaultConsensus,
},
cli.BoolFlag{
Name: "custom-secret, s",
Usage: "prompt for the cluster secret (when no source specified)",
@ -263,7 +274,12 @@ multiaddresses.
},
},
Action: func(c *cli.Context) error {
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
consensus := c.String("consensus")
if consensus != "raft" && consensus != "crdt" {
checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'"))
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus)
defer cfgHelper.Manager().Shutdown() // wait for saves
configExists := false
@ -375,10 +391,6 @@ multiaddresses.
Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"",
Hidden: true,
},
cli.StringFlag{
Name: "consensus",
Usage: "shared state management provider [raft,crdt]",
},
cli.StringFlag{
Name: "pintracker",
Value: defaultPinTracker,
@ -418,16 +430,12 @@ By default, the state will be printed to stdout.
Value: "",
Usage: "writes to an output file",
},
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
},
Action: func(c *cli.Context) error {
locker.lock()
defer locker.tryUnlock()
mgr := getStateManager(c.String("consensus"))
mgr := getStateManager()
var w io.WriteCloser
var err error
@ -463,10 +471,6 @@ to import. If no argument is provided, stdin will be used.
Name: "force, f",
Usage: "skips confirmation prompt",
},
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
},
Action: func(c *cli.Context) error {
locker.lock()
@ -478,7 +482,7 @@ to import. If no argument is provided, stdin will be used.
return nil
}
mgr := getStateManager(c.String("consensus"))
mgr := getStateManager()
// Get the importing file path
importFile := c.Args().First()
@ -511,10 +515,6 @@ to all effects. Peers may need to bootstrap and sync from scratch after this.
Name: "force, f",
Usage: "skip confirmation prompt",
},
cli.StringFlag{
Name: "consensus",
Usage: "consensus component to export data from [raft, crdt]",
},
},
Action: func(c *cli.Context) error {
locker.lock()
@ -528,7 +528,7 @@ to all effects. Peers may need to bootstrap and sync from scratch after this.
return nil
}
mgr := getStateManager(c.String("consensus"))
mgr := getStateManager()
checkErr("cleaning state", mgr.Clean())
logger.Info("data correctly cleaned up")
return nil
@ -608,21 +608,21 @@ func yesNoPrompt(prompt string) bool {
func loadConfigHelper() *cmdutils.ConfigHelper {
// Load all the configurations and identity
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "")
err := cfgHelper.LoadFromDisk()
checkErr("loading identity or configurations", err)
return cfgHelper
}
func getStateManager(consensus string) cmdutils.StateManager {
func getStateManager() cmdutils.StateManager {
cfgHelper := loadConfigHelper()
// since we won't save configs we can shutdown
cfgHelper.Manager().Shutdown()
mgr, err := cmdutils.NewStateManager(
consensus,
cfgHelper.GetConsensus(),
cfgHelper.Identity(),
cfgHelper.Configs(),
)
checkErr("creating state manager,", err)
checkErr("creating state manager", err)
return mgr
}

View File

@ -50,14 +50,16 @@ type ConfigHelper struct {
configPath string
identityPath string
consensus string
}
// NewConfigHelper creates a config helper given the paths to the
// configuration and identity files.
func NewConfigHelper(configPath, identityPath string) *ConfigHelper {
func NewConfigHelper(configPath, identityPath, consensus string) *ConfigHelper {
ch := &ConfigHelper{
configPath: configPath,
identityPath: identityPath,
consensus: consensus,
}
ch.init()
return ch
@ -138,6 +140,29 @@ func (ch *ConfigHelper) Configs() *Configs {
return ch.configs
}
// GetConsensus attempts to return the configured consensus.
// If the ConfigHelper was initialized with a consensus string
// then it returns that.
//
// Otherwise it checks whether one of the consensus configurations
// has been loaded. If both or non have been loaded, it returns
// an empty string.
func (ch *ConfigHelper) GetConsensus() string {
if ch.consensus != "" {
return ch.consensus
}
crdtLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Crdt.ConfigKey())
raftLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Raft.ConfigKey())
if crdtLoaded == raftLoaded { //both loaded or none
return ""
}
if crdtLoaded {
return ch.configs.Crdt.ConfigKey()
}
return ch.configs.Raft.ConfigKey()
}
// register all current cluster components
func (ch *ConfigHelper) init() {
man := config.NewManager()
@ -160,8 +185,6 @@ func (ch *ConfigHelper) init() {
man.RegisterComponent(config.API, cfgs.Restapi)
man.RegisterComponent(config.API, cfgs.Ipfsproxy)
man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp)
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
man.RegisterComponent(config.PinTracker, cfgs.Maptracker)
man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker)
man.RegisterComponent(config.Monitor, cfgs.Pubsubmon)
@ -170,6 +193,16 @@ func (ch *ConfigHelper) init() {
man.RegisterComponent(config.Observations, cfgs.Tracing)
man.RegisterComponent(config.Datastore, cfgs.Badger)
switch ch.consensus {
case cfgs.Raft.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Raft)
case cfgs.Crdt.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Crdt)
default:
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
}
ch.identity = &config.Identity{}
ch.manager = man
ch.configs = cfgs

View File

@ -33,12 +33,12 @@ type StateManager interface {
// consensus ("raft" or "crdt"). It will need initialized configs.
func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) {
switch consensus {
case "raft":
case cfgs.Raft.ConfigKey():
return &raftStateManager{ident, cfgs}, nil
case "crdt":
case cfgs.Crdt.ConfigKey():
return &crdtStateManager{ident, cfgs}, nil
case "":
return nil, errors.New("unspecified consensus component")
return nil, errors.New("could not determine the consensus component")
default:
return nil, fmt.Errorf("unknown consensus component '%s'", consensus)
}

View File

@ -423,7 +423,7 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
logger.Debugf("%s component configuration loaded", name)
} else {
cfg.undefinedComps[t][name] = true
logger.Warningf("%s component is empty, generating default", name)
logger.Debugf("%s component is empty, generating default", name)
component.Default()
}

View File

@ -82,14 +82,14 @@ services:
# to it.
command: >-
sh -c '
cmd="daemon --consensus raft"
if [ ! -d /data/ipfs-cluster/raft ]; then
cmd="daemon"
if [ ! -d /data/ipfs-cluster/badger ]; then
while ! ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id; do
sleep 1
done
pid=`ipfs-cluster-ctl --host /dns4/cluster0/tcp/9094 id | grep -o -E "^(\w+)"`
sleep 10
cmd="daemon --consensus raft --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid"
cmd="daemon --bootstrap /dns4/cluster0/tcp/9096/ipfs/$$pid"
fi
exec /usr/local/bin/entrypoint.sh $$cmd
'

View File

@ -4,6 +4,6 @@
sleep 4
while true; do
export CLUSTER_SECRET=""
pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --consensus raft --debug &
pgrep ipfs-cluster-service || echo "CLUSTER RESTARTING"; ipfs-cluster-service daemon --debug &
sleep 10
done

View File

@ -60,6 +60,10 @@ test_ipfs_running() {
test_cluster_init() {
custom_config_files="$1"
consensus="$2"
if [ -z "$consensus" ]; then
consensus="crdt"
fi
which ipfs-cluster-service >/dev/null 2>&1
if [ $? -ne 0 ]; then
@ -71,7 +75,7 @@ test_cluster_init() {
echo "cluster init FAIL: ipfs-cluster-ctl not found"
exit 1
fi
ipfs-cluster-service -f --config "test-config" init >"$IPFS_OUTPUT" 2>&1
ipfs-cluster-service --config "test-config" init --force --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1
if [ $? -ne 0 ]; then
echo "cluster init FAIL: error on ipfs cluster init"
exit 1
@ -80,7 +84,7 @@ test_cluster_init() {
if [ -n "$custom_config_files" ]; then
cp -f ${custom_config_files}/* "test-config"
fi
cluster_start $2
cluster_start
}
test_cluster_config() {
@ -121,12 +125,7 @@ cluster_kill(){
}
cluster_start(){
consensus="$1"
if [ -z "$consensus" ]; then
consensus="crdt"
fi
ipfs-cluster-service --config "test-config" daemon --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1 &
ipfs-cluster-service --config "test-config" daemon >"$IPFS_OUTPUT" 2>&1 &
while ! curl -s 'localhost:9095/api/v0/version' >/dev/null; do
sleep 0.2
done

View File

@ -6,7 +6,7 @@ statuses=0
for i in t0*.sh;
do
echo "*** $i ***"
./$i --verbose
./$i
status=$?
statuses=$((statuses + $status))
if [ $status -ne 0 ]; then

View File

@ -18,6 +18,12 @@ test_expect_success "cluster-service init without --peers succeeds and creates e
[ ! -s "test-config/peerstore" ]
'
test_expect_success "cluster-service init with raft generates only raft config" '
ipfs-cluster-service --config "test-config" init -f --consensus raft &&
[ "$(jq -M -r .consensus.raft test-config/service.json)" != "null" ] &&
[ "$(jq -M -r .consensus.crdt test-config/service.json)" == "null" ]
'
test_clean_cluster
test_done

View File

@ -5,27 +5,28 @@ test_description="Test service state export"
. lib/test-lib.sh
test_ipfs_init
test_cluster_init
test_cluster_init "" crdt
test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (crdt)" '
cid=`docker exec ipfs sh -c "echo test_52-1 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" &&
sleep 5 &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --debug --config "test-config" state export --consensus crdt -f export.json &&
ipfs-cluster-service --debug --config "test-config" state export -f export.json &&
[ -f export.json ] &&
jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid"
'
cluster_kill
cluster_start raft
sleep 5
test_cluster_init "" raft
test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (raft)" '
cid=`docker exec ipfs sh -c "echo test_52-2 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" &&
sleep 5 &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --debug --config "test-config" state export --consensus raft -f export.json &&
ipfs-cluster-service --debug --config "test-config" state export -f export.json &&
[ -f export.json ] &&
jq -r ".cid | .[\"/\"]" export.json | grep -q "$cid"
'

View File

@ -17,14 +17,14 @@ cluster_kill
test_expect_success IPFS,CLUSTER "state import fails on incorrect format (crdt)" '
sleep 5 &&
echo "not exactly json" > badImportFile &&
test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus crdt -f badImportFile
test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile
'
test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (crdt)" '
sleep 5
cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` &&
ipfs-cluster-service --config "test-config" state import --consensus crdt -f importState &&
cluster_start crdt &&
ipfs-cluster-service --config "test-config" state import -f importState &&
cluster_start &&
sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED"
@ -32,18 +32,19 @@ test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct f
# Kill cluster daemon but keep data folder
cluster_kill
sleep 5
test_expect_success IPFS,CLUSTER "state import fails on incorrect format (raft)" '
sleep 5 &&
ipfs-cluster-service --config "test-config" init --force --consensus raft &&
echo "not exactly json" > badImportFile &&
test_expect_code 1 ipfs-cluster-service --config "test-config" state import --consensus raft -f badImportFile
test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile
'
test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (raft)" '
sleep 5
cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` &&
ipfs-cluster-service --config "test-config" state import --consensus raft -f importState &&
cluster_start raft &&
ipfs-cluster-service --config "test-config" state import -f importState &&
cluster_start &&
sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED"

View File

@ -14,7 +14,7 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f &&
ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
'
@ -24,9 +24,9 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export --consensus crdt -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup --consensus crdt -f &&
ipfs-cluster-service --config "test-config" state import --consensus crdt -f import.json &&
ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup -f &&
ipfs-cluster-service --config "test-config" state import -f import.json &&
cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
@ -35,7 +35,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
cluster_kill
sleep 5
cluster_start "raft"
test_cluster_init "" raft
test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft)" '
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
@ -44,8 +44,8 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f &&
cluster_start raft && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
'
@ -54,10 +54,10 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export --consensus raft -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup --consensus raft -f &&
ipfs-cluster-service --config "test-config" state import --consensus raft -f import.json &&
cluster_start raft && sleep 5 &&
ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup -f &&
ipfs-cluster-service --config "test-config" state import -f import.json &&
cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]