ipfs-cluster/sharness/lib/test-lib.sh
Hector Sanjuan 8f06baa1bf Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:

  * Each component is initialized with a configuration object
  defined by its module
  * Each component decides how the JSON representation of its
  configuration looks like
  * Each component parses and validates its own configuration
  * Each component exposes its own defaults
  * Component configurations are make the sections of a
  central JSON configuration file (which replaces the current
  JSON format)
  * Component configurations implement a common interface
  (config.ComponentConfig) with a set of common operations
  * The central configuration file is managed by a
  config.ConfigManager which:
    * Registers ComponentConfigs
    * Assigns the correspondent sections from the JSON file to each
    component and delegates the parsing
    * Delegates the JSON generation for each section
    * Can be notified when the configuration is updated and must be
    saved to disk

The new service.json would then look as follows:

```json
{
  "cluster": {
    "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
    "private_key": "<...>",
    "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
    "peers": [],
    "bootstrap": [],
    "leave_on_shutdown": false,
    "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
    "state_sync_interval": "1m0s",
    "ipfs_sync_interval": "2m10s",
    "replication_factor": -1,
    "monitor_ping_interval": "15s"
  },
  "consensus": {
    "raft": {
      "heartbeat_timeout": "1s",
      "election_timeout": "1s",
      "commit_timeout": "50ms",
      "max_append_entries": 64,
      "trailing_logs": 10240,
      "snapshot_interval": "2m0s",
      "snapshot_threshold": 8192,
      "leader_lease_timeout": "500ms"
    }
  },
  "api": {
    "restapi": {
      "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
      "read_timeout": "30s",
      "read_header_timeout": "5s",
      "write_timeout": "1m0s",
      "idle_timeout": "2m0s"
    }
  },
  "ipfs_connector": {
    "ipfshttp": {
      "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
      "node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
      "connect_swarms_delay": "7s",
      "proxy_read_timeout": "10m0s",
      "proxy_read_header_timeout": "5s",
      "proxy_write_timeout": "10m0s",
      "proxy_idle_timeout": "1m0s"
    }
  },
  "monitor": {
    "monbasic": {
      "check_interval": "15s"
    }
  },
  "informer": {
    "disk": {
      "metric_ttl": "30s",
      "metric_type": "freespace"
    },
    "numpin": {
      "metric_ttl": "10s"
    }
  }
}
```

This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.

Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-18 00:00:12 +02:00

103 lines
2.6 KiB
Bash
Executable File

# Sharness test framework for ipfs-cluster
#
# We are using sharness (https://github.com/mlafeldt/sharness)
# which was extracted from the Git test framework.
SHARNESS_LIB="lib/sharness/sharness.sh"
# Daemons output will be redirected to...
IPFS_OUTPUT="/dev/null" # change for debugging
# IPFS_OUTPUT="/dev/stderr" # change for debugging
. "$SHARNESS_LIB" || {
echo >&2 "Cannot source: $SHARNESS_LIB"
echo >&2 "Please check Sharness installation."
exit 1
}
which jq >/dev/null 2>&1
if [ $? -eq 0 ]; then
test_set_prereq JQ
fi
# Set prereqs
test_ipfs_init() {
which docker >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Docker not found"
exit 1
fi
if docker ps --format '{{.Names}}' | egrep -q '^ipfs$'; then
echo "ipfs container already running"
else
docker run --name ipfs -d -p 127.0.0.1:5001:5001 ipfs/go-ipfs > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Error running go-ipfs in docker."
exit 1
fi
sleep 10
fi
test_set_prereq IPFS
}
test_ipfs_running() {
if curl -s "localhost:5001/api/v0/version" > /dev/null; then
test_set_prereq IPFS
else
echo "IPFS is not running"
exit 1
fi
}
test_cluster_init() {
custom_config_files="$1"
which ipfs-cluster-service >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "ipfs-cluster-service not found"
exit 1
fi
which ipfs-cluster-ctl >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "ipfs-cluster-ctl not found"
exit 1
fi
ipfs-cluster-service -f --config "test-config" init >"$IPFS_OUTPUT" 2>&1
if [ $? -ne 0 ]; then
echo "error initializing ipfs cluster"
exit 1
fi
rm -rf "test-config/ipfs-cluster-data"
if [ -n "$custom_config_files" ]; then
cp -f ${custom_config_files}/* "test-config"
fi
ipfs-cluster-service --config "test-config" >"$IPFS_OUTPUT" 2>&1 &
export CLUSTER_D_PID=$!
sleep 5
test_set_prereq CLUSTER
}
test_cluster_config() {
export CLUSTER_CONFIG_PATH="test-config/service.json"
export CLUSTER_CONFIG_ID=`jq --raw-output ".cluster.id" $CLUSTER_CONFIG_PATH`
export CLUSTER_CONFIG_PK=`jq --raw-output ".cluster.private_key" $CLUSTER_CONFIG_PATH`
[ "$CLUSTER_CONFIG_ID" != "null" ] && [ "$CLUSTER_CONFIG_PK" != "null" ]
}
cluster_id() {
jq --raw-output ".cluster.id" test-config/service.json
}
# Cleanup functions
test_clean_ipfs(){
docker kill ipfs
docker rm ipfs
sleep 1
}
test_clean_cluster(){
kill -1 "$CLUSTER_D_PID"
rm -rf 'test-config'
sleep 2
}