Daemon: support remote configuration (#868)

* Daemon: support remote configuration

This:

* Adds support for fetching the configuration from a remote HTTP location:

`ipfs-cluster-service init http://localhost:8080/ipfs/Qm...` will instruct
cluster to read the configuration file from ipfs on start (potentially making
use of ipns and dnslink).

This is done by creating a `service.json` like `{ "source": <url> }`.

The source is then read when loading that configuration every time the daemon starts.

This allows to let users always use a mutating remote configuration, potentially
adding/removing trusted peers from the list or adjusting other things.

* Configuration and state helpers from ipfs-cluster-service have been extracted
to its own cmdutils package. This will help supporting something like an
`ipfs-cluster-follow` command in the next releases.

* Allows to disable the rest api by not defining it in the configuration (I thought
this was already so, but apparently only affected the ipfsproxy).

* Removes informer/allocator configurations from the daemon (--alloc). No one used
a non default pair. In fact, it was potentially buggy to use the reposize one.
This commit is contained in:
Hector Sanjuan 2019-08-09 12:56:27 +02:00 committed by GitHub
parent 04b281e86f
commit 00e78a6b6d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 520 additions and 347 deletions

View File

@ -1,162 +0,0 @@
package main
import (
"errors"
"os"
"path/filepath"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/api/ipfsproxy"
"github.com/ipfs/ipfs-cluster/api/rest"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/datastore/badger"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
"github.com/ipfs/ipfs-cluster/pintracker/stateless"
)
type cfgs struct {
clusterCfg *ipfscluster.Config
apiCfg *rest.Config
ipfsproxyCfg *ipfsproxy.Config
ipfshttpCfg *ipfshttp.Config
raftCfg *raft.Config
crdtCfg *crdt.Config
maptrackerCfg *maptracker.Config
statelessTrackerCfg *stateless.Config
pubsubmonCfg *pubsubmon.Config
diskInfCfg *disk.Config
numpinInfCfg *numpin.Config
metricsCfg *observations.MetricsConfig
tracingCfg *observations.TracingConfig
badgerCfg *badger.Config
}
func makeConfigs() (*config.Manager, *cfgs) {
cfg := config.NewManager()
clusterCfg := &ipfscluster.Config{}
apiCfg := &rest.Config{}
ipfsproxyCfg := &ipfsproxy.Config{}
ipfshttpCfg := &ipfshttp.Config{}
raftCfg := &raft.Config{}
crdtCfg := &crdt.Config{}
maptrackerCfg := &maptracker.Config{}
statelessCfg := &stateless.Config{}
pubsubmonCfg := &pubsubmon.Config{}
diskInfCfg := &disk.Config{}
numpinInfCfg := &numpin.Config{}
metricsCfg := &observations.MetricsConfig{}
tracingCfg := &observations.TracingConfig{}
badgerCfg := &badger.Config{}
cfg.RegisterComponent(config.Cluster, clusterCfg)
cfg.RegisterComponent(config.API, apiCfg)
cfg.RegisterComponent(config.API, ipfsproxyCfg)
cfg.RegisterComponent(config.IPFSConn, ipfshttpCfg)
cfg.RegisterComponent(config.Consensus, raftCfg)
cfg.RegisterComponent(config.Consensus, crdtCfg)
cfg.RegisterComponent(config.PinTracker, maptrackerCfg)
cfg.RegisterComponent(config.PinTracker, statelessCfg)
cfg.RegisterComponent(config.Monitor, pubsubmonCfg)
cfg.RegisterComponent(config.Informer, diskInfCfg)
cfg.RegisterComponent(config.Informer, numpinInfCfg)
cfg.RegisterComponent(config.Observations, metricsCfg)
cfg.RegisterComponent(config.Observations, tracingCfg)
cfg.RegisterComponent(config.Datastore, badgerCfg)
return cfg, &cfgs{
clusterCfg,
apiCfg,
ipfsproxyCfg,
ipfshttpCfg,
raftCfg,
crdtCfg,
maptrackerCfg,
statelessCfg,
pubsubmonCfg,
diskInfCfg,
numpinInfCfg,
metricsCfg,
tracingCfg,
badgerCfg,
}
}
func makeAndLoadConfigs() (*config.Manager, *config.Identity, *cfgs) {
ident := loadIdentity()
cfgMgr, cfgs := makeConfigs()
checkErr("reading configuration", cfgMgr.LoadJSONFileAndEnv(configPath))
return cfgMgr, ident, cfgs
}
func loadIdentity() *config.Identity {
_, err := os.Stat(identityPath)
ident := &config.Identity{}
// temporary hack to convert identity
if os.IsNotExist(err) {
clusterConfig, err := config.GetClusterConfig(configPath)
checkErr("loading configuration", err)
err = ident.LoadJSON(clusterConfig)
if err != nil {
checkErr("", errors.New("error loading identity"))
}
err = ident.SaveJSON(identityPath)
checkErr("saving identity.json ", err)
err = ident.ApplyEnvVars()
checkErr("applying environment variables to the identity", err)
out("\nNOTICE: identity information extracted from %s and saved as %s.\n\n", DefaultConfigFile, DefaultIdentityFile)
return ident
}
err = ident.LoadJSONFromFile(identityPath)
checkErr("loading identity from %s", err, DefaultIdentityFile)
err = ident.ApplyEnvVars()
checkErr("applying environment variables to the identity", err)
return ident
}
func makeConfigFolder() {
f := filepath.Dir(configPath)
if _, err := os.Stat(f); os.IsNotExist(err) {
err := os.MkdirAll(f, 0700)
checkErr("creating configuration folder (%s)", err, f)
}
}
func saveConfig(cfg *config.Manager) {
makeConfigFolder()
err := cfg.SaveJSON(configPath)
checkErr("saving new configuration", err)
out("configuration written to %s\n", configPath)
}
func propagateTracingConfig(ident *config.Identity, cfgs *cfgs, tracingFlag bool) *cfgs {
// tracingFlag represents the cli flag passed to ipfs-cluster-service daemon.
// It takes priority. If false, fallback to config file value.
tracingValue := tracingFlag
if !tracingFlag {
tracingValue = cfgs.tracingCfg.EnableTracing
}
// propagate to any other interested configuration
cfgs.tracingCfg.ClusterID = ident.ID.Pretty()
cfgs.tracingCfg.ClusterPeername = cfgs.clusterCfg.Peername
cfgs.tracingCfg.EnableTracing = tracingValue
cfgs.clusterCfg.Tracing = tracingValue
cfgs.raftCfg.Tracing = tracingValue
cfgs.crdtCfg.Tracing = tracingValue
cfgs.apiCfg.Tracing = tracingValue
cfgs.ipfshttpCfg.Tracing = tracingValue
cfgs.ipfsproxyCfg.Tracing = tracingValue
return cfgs
}

View File

@ -8,15 +8,14 @@ import (
"time"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/allocator/ascendalloc"
"github.com/ipfs/ipfs-cluster/allocator/descendalloc"
"github.com/ipfs/ipfs-cluster/api/ipfsproxy"
"github.com/ipfs/ipfs-cluster/api/rest"
"github.com/ipfs/ipfs-cluster/cmdutils"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
@ -62,35 +61,36 @@ func daemon(c *cli.Context) error {
defer locker.tryUnlock()
// Load all the configurations and identity
cfgMgr, ident, cfgs := makeAndLoadConfigs()
cfgHelper := loadConfigHelper()
defer cfgHelper.Manager().Shutdown()
defer cfgMgr.Shutdown()
cfgs := cfgHelper.Configs()
if len(bootstraps) > 0 && !c.Bool("no-trust") {
cfgs.crdtCfg.TrustedPeers = append(cfgs.crdtCfg.TrustedPeers, ipfscluster.PeersFromMultiaddrs(bootstraps)...)
if !c.Bool("no-trust") {
crdtCfg := cfgs.Crdt
crdtCfg.TrustedPeers = append(crdtCfg.TrustedPeers, ipfscluster.PeersFromMultiaddrs(bootstraps)...)
}
if c.Bool("stats") {
cfgs.metricsCfg.EnableStats = true
cfgs.Metrics.EnableStats = true
}
cfgs = propagateTracingConfig(ident, cfgs, c.Bool("tracing"))
cfgHelper.SetupTracing(c.Bool("tracing"))
// Cleanup state if bootstrapping
raftStaging := false
if len(bootstraps) > 0 && c.String("consensus") == "raft" {
raft.CleanupRaft(cfgs.raftCfg)
raft.CleanupRaft(cfgs.Raft)
raftStaging = true
}
if c.Bool("leave") {
cfgs.clusterCfg.LeaveOnShutdown = true
cfgs.Cluster.LeaveOnShutdown = true
}
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, ident, cfgs.clusterCfg)
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster)
checkErr("creating libp2p host", err)
cluster, err := createCluster(ctx, c, cfgMgr, host, pubsub, dht, ident, cfgs, raftStaging)
cluster, err := createCluster(ctx, c, cfgHelper, host, pubsub, dht, raftStaging)
checkErr("starting cluster", err)
// noop if no bootstraps
@ -109,55 +109,57 @@ func daemon(c *cli.Context) error {
func createCluster(
ctx context.Context,
c *cli.Context,
cfgMgr *config.Manager,
cfgHelper *cmdutils.ConfigHelper,
host host.Host,
pubsub *pubsub.PubSub,
dht *dht.IpfsDHT,
ident *config.Identity,
cfgs *cfgs,
raftStaging bool,
) (*ipfscluster.Cluster, error) {
cfgs := cfgHelper.Configs()
cfgMgr := cfgHelper.Manager()
ctx, err := tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))
checkErr("tag context with host id", err)
api, err := rest.NewAPIWithHost(ctx, cfgs.apiCfg, host)
checkErr("creating REST API component", err)
var apis []ipfscluster.API
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Restapi.ConfigKey()) {
rest, err := rest.NewAPIWithHost(ctx, cfgs.Restapi, host)
checkErr("creating REST API component", err)
apis = append(apis, rest)
}
apis := []ipfscluster.API{api}
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.ipfsproxyCfg.ConfigKey()) {
proxy, err := ipfsproxy.New(cfgs.ipfsproxyCfg)
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Ipfsproxy.ConfigKey()) {
proxy, err := ipfsproxy.New(cfgs.Ipfsproxy)
checkErr("creating IPFS Proxy component", err)
apis = append(apis, proxy)
}
connector, err := ipfshttp.NewConnector(cfgs.ipfshttpCfg)
connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)
checkErr("creating IPFS Connector component", err)
tracker := setupPinTracker(
c.String("pintracker"),
host,
cfgs.maptrackerCfg,
cfgs.statelessTrackerCfg,
cfgs.clusterCfg.Peername,
cfgs.Maptracker,
cfgs.Statelesstracker,
cfgs.Cluster.Peername,
)
informer, alloc := setupAllocation(
c.String("alloc"),
cfgs.diskInfCfg,
cfgs.numpinInfCfg,
)
informer, err := disk.NewInformer(cfgs.Diskinf)
checkErr("creating disk informer", err)
alloc := descendalloc.NewAllocator()
ipfscluster.ReadyTimeout = cfgs.raftCfg.WaitForLeaderTimeout + 5*time.Second
ipfscluster.ReadyTimeout = cfgs.Raft.WaitForLeaderTimeout + 5*time.Second
err = observations.SetupMetrics(cfgs.metricsCfg)
err = observations.SetupMetrics(cfgs.Metrics)
checkErr("setting up Metrics", err)
tracer, err := observations.SetupTracing(cfgs.tracingCfg)
tracer, err := observations.SetupTracing(cfgs.Tracing)
checkErr("setting up Tracing", err)
store := setupDatastore(c.String("consensus"), ident, cfgs)
store := setupDatastore(c.String("consensus"), cfgHelper.Identity(), cfgs)
cons, err := setupConsensus(
c.String("consensus"),
@ -178,7 +180,7 @@ func createCluster(
peersF = cons.Peers
}
mon, err := pubsubmon.New(ctx, cfgs.pubsubmonCfg, pubsub, peersF)
mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, peersF)
if err != nil {
store.Close()
checkErr("setting up PeerMonitor", err)
@ -188,7 +190,7 @@ func createCluster(
ctx,
host,
dht,
cfgs.clusterCfg,
cfgs.Cluster,
store,
cons,
apis,
@ -268,31 +270,6 @@ Note that this may corrupt the local cluster state.
}
}
func setupAllocation(
name string,
diskInfCfg *disk.Config,
numpinInfCfg *numpin.Config,
) (ipfscluster.Informer, ipfscluster.PinAllocator) {
switch name {
case "disk", "disk-freespace":
informer, err := disk.NewInformer(diskInfCfg)
checkErr("creating informer", err)
return informer, descendalloc.NewAllocator()
case "disk-reposize":
informer, err := disk.NewInformer(diskInfCfg)
checkErr("creating informer", err)
return informer, ascendalloc.NewAllocator()
case "numpin", "pincount":
informer, err := numpin.NewInformer(numpinInfCfg)
checkErr("creating informer", err)
return informer, ascendalloc.NewAllocator()
default:
err := errors.New("unknown allocation strategy")
checkErr("", err)
return nil, nil
}
}
func setupPinTracker(
name string,
h host.Host,
@ -319,9 +296,10 @@ func setupPinTracker(
func setupDatastore(
consensus string,
ident *config.Identity,
cfgs *cfgs,
cfgs *cmdutils.Configs,
) ds.Datastore {
stmgr := newStateManager(consensus, ident, cfgs)
stmgr, err := cmdutils.NewStateManager(consensus, ident, cfgs)
checkErr("creating state manager", err)
store, err := stmgr.GetStore()
checkErr("creating datastore", err)
return store
@ -332,7 +310,7 @@ func setupConsensus(
h host.Host,
dht *dht.IpfsDHT,
pubsub *pubsub.PubSub,
cfgs *cfgs,
cfgs *cmdutils.Configs,
store ds.Datastore,
raftStaging bool,
) (ipfscluster.Consensus, error) {
@ -340,7 +318,7 @@ func setupConsensus(
case "raft":
rft, err := raft.NewConsensus(
h,
cfgs.raftCfg,
cfgs.Raft,
store,
raftStaging,
)
@ -353,7 +331,7 @@ func setupConsensus(
h,
dht,
pubsub,
cfgs.crdtCfg,
cfgs.Crdt,
store,
)
if err != nil {

View File

@ -7,6 +7,7 @@ import (
"path"
fslock "github.com/ipfs/go-fs-lock"
"github.com/ipfs/ipfs-cluster/cmdutils"
)
// lock logic heavily inspired by go-ipfs/repo/fsrepo/lock/lock.go
@ -28,7 +29,8 @@ func (l *lock) lock() {
}
// we should have a config folder whenever we try to lock
makeConfigFolder()
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
cfgHelper.MakeConfigFolder()
// set the lock file within this function
logger.Debug("checking lock")

View File

@ -12,7 +12,7 @@ import (
"strings"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/cmdutils"
"github.com/ipfs/ipfs-cluster/pstoremgr"
"github.com/ipfs/ipfs-cluster/version"
peer "github.com/libp2p/go-libp2p-core/peer"
@ -28,7 +28,6 @@ const programName = `ipfs-cluster-service`
// flag defaults
const (
defaultAllocation = "disk-freespace"
defaultPinTracker = "map"
defaultLogLevel = "info"
)
@ -222,12 +221,17 @@ func main() {
This command will initialize a new %s configuration file and, if it
does already exist, generate a new %s for %s.
By default, %s requires a cluster secret. This secret will be
automatically generated, but can be manually provided with --custom-secret
(in which case it will be prompted), or by setting the CLUSTER_SECRET
environment variable.
If the optional [source-url] is given, the generated configuration file
will refer to it. The source configuration will be fetched from its source
URL during the launch of the daemon. If not, a default standard configuration
file will be created.
Note that the --force first-level-flag allows to overwrite an existing
In the latter case, a cluster secret will be generated as required by %s.
Alternatively, this secret can be manually provided with --custom-secret (in
which case it will be prompted), or by setting the CLUSTER_SECRET environment
variable.
Note that the --force flag allows to overwrite an existing
configuration with default values. To generate a new identity, please
remove the %s file first and clean any Raft state.
@ -243,22 +247,24 @@ multiaddresses.
programName,
DefaultIdentityFile,
),
ArgsUsage: " ",
ArgsUsage: "[http-source-url]",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "custom-secret, s",
Usage: "prompt for the cluster secret",
Usage: "prompt for the cluster secret (when no source specified)",
},
cli.StringFlag{
Name: "peers",
Usage: "comma-separated list of multiaddresses to init with",
Usage: "comma-separated list of multiaddresses to init with (see help)",
},
cli.BoolFlag{
Name: "force, f",
Usage: "overwrite configuration without prompting",
},
},
Action: func(c *cli.Context) error {
userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret"))
cfgMgr, cfgs := makeConfigs()
defer cfgMgr.Shutdown() // wait for saves
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
defer cfgHelper.Manager().Shutdown() // wait for saves
configExists := false
if _, err := os.Stat(configPath); !os.IsNotExist(err) {
@ -284,23 +290,26 @@ multiaddresses.
)
// --force allows override of the prompt
if !c.GlobalBool("force") {
if !c.Bool("force") {
if !yesNoPrompt(confirm) {
return nil
}
}
}
// Generate defaults for all registered components
err := cfgMgr.Default()
checkErr("generating default configuration", err)
// Set url. If exists, it will be the only thing saved.
cfgHelper.Manager().Source = c.Args().First()
err = cfgMgr.ApplyEnvVars()
// Generate defaults for all registered components
err := cfgHelper.Manager().Default()
checkErr("generating default configuration", err)
err = cfgHelper.Manager().ApplyEnvVars()
checkErr("applying environment variables to configuration", err)
userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret") && !c.Args().Present())
// Set user secret
if userSecretDefined {
cfgs.clusterCfg.Secret = userSecret
cfgHelper.Configs().Cluster.Secret = userSecret
}
peersOpt := c.String("peers")
@ -316,29 +325,29 @@ multiaddresses.
}
peers := ipfscluster.PeersFromMultiaddrs(multiAddrs)
cfgs.crdtCfg.TrustedPeers = peers
cfgs.raftCfg.InitPeerset = peers
cfgHelper.Configs().Crdt.TrustedPeers = peers
cfgHelper.Configs().Raft.InitPeerset = peers
}
// Save config. Creates the folder.
// Sets BaseDir in components.
saveConfig(cfgMgr)
cfgHelper.SaveConfigToDisk()
if !identityExists {
// Create a new identity and save it
ident, err := config.NewIdentity()
ident := cfgHelper.Identity()
err := ident.Default()
checkErr("generating an identity", err)
err = ident.ApplyEnvVars()
checkErr("applying environment variables to the identity", err)
err = ident.SaveJSON(identityPath)
err = cfgHelper.SaveIdentityToDisk()
checkErr("saving "+DefaultIdentityFile, err)
out("new identity written to %s\n", identityPath)
}
// Initialize peerstore file - even if empty
peerstorePath := cfgs.clusterCfg.GetPeerstorePath()
peerstorePath := cfgHelper.Configs().Cluster.GetPeerstorePath()
peerManager := pstoremgr.New(context.Background(), nil, peerstorePath)
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
checkErr("getting AddrInfos from peer multiaddresses", err)
@ -370,11 +379,6 @@ multiaddresses.
Name: "consensus",
Usage: "shared state management provider [raft,crdt]",
},
cli.StringFlag{
Name: "alloc, a",
Value: defaultAllocation,
Usage: "allocation strategy to use [disk-freespace,disk-reposize,numpin].",
},
cli.StringFlag{
Name: "pintracker",
Value: defaultPinTracker,
@ -423,9 +427,7 @@ By default, the state will be printed to stdout.
locker.lock()
defer locker.tryUnlock()
cfgMgr, ident, cfgs := makeAndLoadConfigs()
defer cfgMgr.Shutdown()
mgr := newStateManager(c.String("consensus"), ident, cfgs)
mgr := getStateManager(c.String("consensus"))
var w io.WriteCloser
var err error
@ -476,9 +478,7 @@ to import. If no argument is provided, stdin will be used.
return nil
}
cfgMgr, ident, cfgs := makeAndLoadConfigs()
defer cfgMgr.Shutdown()
mgr := newStateManager(c.String("consensus"), ident, cfgs)
mgr := getStateManager(c.String("consensus"))
// Get the importing file path
importFile := c.Args().First()
@ -528,9 +528,7 @@ to all effects. Peers may need to bootstrap and sync from scratch after this.
return nil
}
cfgMgr, ident, cfgs := makeAndLoadConfigs()
defer cfgMgr.Shutdown()
mgr := newStateManager(c.String("consensus"), ident, cfgs)
mgr := getStateManager(c.String("consensus"))
checkErr("cleaning state", mgr.Clean())
logger.Info("data correctly cleaned up")
return nil
@ -607,3 +605,24 @@ func yesNoPrompt(prompt string) bool {
}
return false
}
func loadConfigHelper() *cmdutils.ConfigHelper {
// Load all the configurations and identity
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath)
err := cfgHelper.LoadFromDisk()
checkErr("loading identity or configurations", err)
return cfgHelper
}
func getStateManager(consensus string) cmdutils.StateManager {
cfgHelper := loadConfigHelper()
// since we won't save configs we can shutdown
cfgHelper.Manager().Shutdown()
mgr, err := cmdutils.NewStateManager(
consensus,
cfgHelper.Identity(),
cfgHelper.Configs(),
)
checkErr("creating state manager,", err)
return mgr
}

3
cmdutils/cmdutils.go Normal file
View File

@ -0,0 +1,3 @@
// Package cmdutils contains utilities to facilitate building of command line
// applications launching cluster peers.
package cmdutils

224
cmdutils/configs.go Normal file
View File

@ -0,0 +1,224 @@
package cmdutils
import (
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/api/ipfsproxy"
"github.com/ipfs/ipfs-cluster/api/rest"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/datastore/badger"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
"github.com/ipfs/ipfs-cluster/pintracker/stateless"
)
// Configs carries config types used by a Cluster Peer.
type Configs struct {
Cluster *ipfscluster.Config
Restapi *rest.Config
Ipfsproxy *ipfsproxy.Config
Ipfshttp *ipfshttp.Config
Raft *raft.Config
Crdt *crdt.Config
Maptracker *maptracker.Config
Statelesstracker *stateless.Config
Pubsubmon *pubsubmon.Config
Diskinf *disk.Config
Numpininf *numpin.Config
Metrics *observations.MetricsConfig
Tracing *observations.TracingConfig
Badger *badger.Config
}
// ConfigHelper helps managing the configuration and identity files with the
// standard set of cluster components.
type ConfigHelper struct {
identity *config.Identity
manager *config.Manager
configs *Configs
configPath string
identityPath string
}
// NewConfigHelper creates a config helper given the paths to the
// configuration and identity files.
func NewConfigHelper(configPath, identityPath string) *ConfigHelper {
ch := &ConfigHelper{
configPath: configPath,
identityPath: identityPath,
}
ch.init()
return ch
}
// LoadConfigFromDisk parses the configuration from disk.
func (ch *ConfigHelper) LoadConfigFromDisk() error {
return ch.manager.LoadJSONFileAndEnv(ch.configPath)
}
// LoadIdentityFromDisk parses the identity from disk.
func (ch *ConfigHelper) LoadIdentityFromDisk() error {
// load identity with hack for 0.11.0 - identity separation.
_, err := os.Stat(ch.identityPath)
ident := &config.Identity{}
// temporary hack to convert identity
if os.IsNotExist(err) {
clusterConfig, err := config.GetClusterConfig(ch.configPath)
if err != nil {
return err
}
err = ident.LoadJSON(clusterConfig)
if err != nil {
return errors.Wrap(err, "error loading identity")
}
err = ident.SaveJSON(ch.identityPath)
if err != nil {
return errors.Wrap(err, "error saving identity")
}
fmt.Fprintf(
os.Stderr,
"\nNOTICE: identity information extracted from %s and saved as %s.\n\n",
ch.configPath,
ch.identityPath,
)
} else { // leave this part when the hack is removed.
err = ident.LoadJSONFromFile(ch.identityPath)
if err != nil {
return fmt.Errorf("error loading identity from %s: %s", ch.identityPath, err)
}
}
err = ident.ApplyEnvVars()
if err != nil {
return errors.Wrap(err, "error applying environment variables to the identity")
}
ch.identity = ident
return nil
}
// LoadFromDisk loads both configuration and identity from disk.
func (ch *ConfigHelper) LoadFromDisk() error {
err := ch.LoadConfigFromDisk()
if err != nil {
return err
}
return ch.LoadIdentityFromDisk()
}
// Identity returns the Identity object. It returns an empty identity
// if not loaded yet.
func (ch *ConfigHelper) Identity() *config.Identity {
return ch.identity
}
// Manager returns the config manager with all the
// cluster configurations registered.
func (ch *ConfigHelper) Manager() *config.Manager {
return ch.manager
}
// Configs returns the Configs object which holds all the cluster
// configurations. Configurations are empty if they have not been loaded from
// disk.
func (ch *ConfigHelper) Configs() *Configs {
return ch.configs
}
// register all current cluster components
func (ch *ConfigHelper) init() {
man := config.NewManager()
cfgs := &Configs{
Cluster: &ipfscluster.Config{},
Restapi: &rest.Config{},
Ipfsproxy: &ipfsproxy.Config{},
Ipfshttp: &ipfshttp.Config{},
Raft: &raft.Config{},
Crdt: &crdt.Config{},
Maptracker: &maptracker.Config{},
Statelesstracker: &stateless.Config{},
Pubsubmon: &pubsubmon.Config{},
Diskinf: &disk.Config{},
Metrics: &observations.MetricsConfig{},
Tracing: &observations.TracingConfig{},
Badger: &badger.Config{},
}
man.RegisterComponent(config.Cluster, cfgs.Cluster)
man.RegisterComponent(config.API, cfgs.Restapi)
man.RegisterComponent(config.API, cfgs.Ipfsproxy)
man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp)
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
man.RegisterComponent(config.PinTracker, cfgs.Maptracker)
man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker)
man.RegisterComponent(config.Monitor, cfgs.Pubsubmon)
man.RegisterComponent(config.Informer, cfgs.Diskinf)
man.RegisterComponent(config.Observations, cfgs.Metrics)
man.RegisterComponent(config.Observations, cfgs.Tracing)
man.RegisterComponent(config.Datastore, cfgs.Badger)
ch.identity = &config.Identity{}
ch.manager = man
ch.configs = cfgs
}
// MakeConfigFolder creates the folder to hold
// configuration and identity files.
func (ch *ConfigHelper) MakeConfigFolder() error {
f := filepath.Dir(ch.configPath)
if _, err := os.Stat(f); os.IsNotExist(err) {
err := os.MkdirAll(f, 0700)
if err != nil {
return err
}
}
return nil
}
// SaveConfigToDisk saves the configuration file to disk.
func (ch *ConfigHelper) SaveConfigToDisk() error {
err := ch.MakeConfigFolder()
if err != nil {
return err
}
return ch.manager.SaveJSON(ch.configPath)
}
// SaveIdentityToDisk saves the identity file to disk.
func (ch *ConfigHelper) SaveIdentityToDisk() error {
err := ch.MakeConfigFolder()
if err != nil {
return err
}
return ch.Identity().SaveJSON(ch.identityPath)
}
// SetupTracing propagates tracingCfg.EnableTracing to all other
// configurations. Use only when identity has been loaded or generated. The
// forceEnabled parameter allows to override the EnableTracing value.
func (ch *ConfigHelper) SetupTracing(forceEnabled bool) {
enabled := forceEnabled || ch.configs.Tracing.EnableTracing
ch.configs.Tracing.ClusterID = ch.Identity().ID.Pretty()
ch.configs.Tracing.ClusterPeername = ch.configs.Cluster.Peername
ch.configs.Tracing.EnableTracing = enabled
ch.configs.Cluster.Tracing = enabled
ch.configs.Raft.Tracing = enabled
ch.configs.Crdt.Tracing = enabled
ch.configs.Restapi.Tracing = enabled
ch.configs.Ipfshttp.Tracing = enabled
ch.configs.Ipfsproxy.Tracing = enabled
}

View File

@ -1,4 +1,4 @@
package main
package cmdutils
import (
"context"
@ -20,30 +20,33 @@ import (
ds "github.com/ipfs/go-datastore"
)
type stateManager interface {
// StateManager is the interface that allows to import, export and clean
// different cluster states depending on the consensus component used.
type StateManager interface {
ImportState(io.Reader) error
ExportState(io.Writer) error
GetStore() (ds.Datastore, error)
Clean() error
}
func newStateManager(consensus string, ident *config.Identity, cfgs *cfgs) stateManager {
// NewStateManager returns an state manager implementation for the given
// consensus ("raft" or "crdt"). It will need initialized configs.
func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) {
switch consensus {
case "raft":
return &raftStateManager{ident, cfgs}
return &raftStateManager{ident, cfgs}, nil
case "crdt":
return &crdtStateManager{ident, cfgs}
return &crdtStateManager{ident, cfgs}, nil
case "":
checkErr("", errors.New("unspecified consensus component"))
return nil, errors.New("unspecified consensus component")
default:
checkErr("", fmt.Errorf("unknown consensus component '%s'", consensus))
return nil, fmt.Errorf("unknown consensus component '%s'", consensus)
}
return nil
}
type raftStateManager struct {
ident *config.Identity
cfgs *cfgs
cfgs *Configs
}
func (raftsm *raftStateManager) GetStore() (ds.Datastore, error) {
@ -51,7 +54,7 @@ func (raftsm *raftStateManager) GetStore() (ds.Datastore, error) {
}
func (raftsm *raftStateManager) getOfflineState(store ds.Datastore) (state.State, error) {
return raft.OfflineState(raftsm.cfgs.raftCfg, store)
return raft.OfflineState(raftsm.cfgs.Raft, store)
}
func (raftsm *raftStateManager) ImportState(r io.Reader) error {
@ -73,12 +76,12 @@ func (raftsm *raftStateManager) ImportState(r io.Reader) error {
if err != nil {
return err
}
pm := pstoremgr.New(context.Background(), nil, raftsm.cfgs.clusterCfg.GetPeerstorePath())
pm := pstoremgr.New(context.Background(), nil, raftsm.cfgs.Cluster.GetPeerstorePath())
raftPeers := append(
ipfscluster.PeersFromMultiaddrs(pm.LoadPeerstore()),
raftsm.ident.ID,
)
return raft.SnapshotSave(raftsm.cfgs.raftCfg, st, raftPeers)
return raft.SnapshotSave(raftsm.cfgs.Raft, st, raftPeers)
}
func (raftsm *raftStateManager) ExportState(w io.Writer) error {
@ -95,16 +98,16 @@ func (raftsm *raftStateManager) ExportState(w io.Writer) error {
}
func (raftsm *raftStateManager) Clean() error {
return raft.CleanupRaft(raftsm.cfgs.raftCfg)
return raft.CleanupRaft(raftsm.cfgs.Raft)
}
type crdtStateManager struct {
ident *config.Identity
cfgs *cfgs
cfgs *Configs
}
func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) {
bds, err := badger.New(crdtsm.cfgs.badgerCfg)
bds, err := badger.New(crdtsm.cfgs.Badger)
if err != nil {
return nil, err
}
@ -112,7 +115,7 @@ func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) {
}
func (crdtsm *crdtStateManager) getOfflineState(store ds.Datastore) (state.BatchingState, error) {
return crdt.OfflineState(crdtsm.cfgs.crdtCfg, store)
return crdt.OfflineState(crdtsm.cfgs.Crdt, store)
}
func (crdtsm *crdtStateManager) ImportState(r io.Reader) error {
@ -158,7 +161,7 @@ func (crdtsm *crdtStateManager) Clean() error {
return err
}
defer store.Close()
return crdt.Clean(context.Background(), crdtsm.cfgs.crdtCfg, store)
return crdt.Clean(context.Background(), crdtsm.cfgs.Crdt, store)
}
func importState(r io.Reader, st state.State) error {

View File

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"sync"
"time"
@ -22,6 +23,8 @@ var logger = logging.Logger("config")
// it needs saving.
var ConfigSaveInterval = time.Second
var errSourceRedirect = errors.New("a sourced configuration cannot point to another source")
// The ComponentConfig interface allows components to define configurations
// which can be managed as part of the ipfs-cluster configuration file by the
// Manager.
@ -103,6 +106,10 @@ type Manager struct {
// store originally parsed jsonConfig
jsonCfg *jsonConfig
// stores original source if any
Source string
sourceRedirs int // used avoid recursive source load
// map of components which has empty configuration
// in JSON file
@ -174,7 +181,8 @@ func (cfg *Manager) watchSave(save <-chan struct{}) {
// saved using json. Most configuration keys are converted into simple types
// like strings, and key names aim to be self-explanatory for the user.
type jsonConfig struct {
Cluster *json.RawMessage `json:"cluster"`
Source string `json:"source,omitempty"`
Cluster *json.RawMessage `json:"cluster,omitempty"`
Consensus jsonSection `json:"consensus,omitempty"`
API jsonSection `json:"api,omitempty"`
IPFSConn jsonSection `json:"ipfs_connector,omitempty"`
@ -336,6 +344,35 @@ func (cfg *Manager) LoadJSONFromFile(path string) error {
return err
}
// LoadJSONFromHTTPSource reads a Configuration file from a URL and parses it.
func (cfg *Manager) LoadJSONFromHTTPSource(url string) error {
logger.Infof("loading configuration from %s", url)
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
// Avoid recursively loading remote sources
if cfg.sourceRedirs > 0 {
return errSourceRedirect
}
cfg.sourceRedirs++
// make sure the counter is always reset when function done
defer func() { cfg.sourceRedirs = 0 }()
err = cfg.LoadJSON(body)
if err != nil {
return err
}
cfg.Source = url
return nil
}
// LoadJSONFileAndEnv calls LoadJSONFromFile followed by ApplyEnvVars,
// reading and parsing a Configuration file and then overriding fields
// with any values found in environment variables.
@ -361,6 +398,10 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
}
cfg.jsonCfg = jcfg
// Handle remote source
if jcfg.Source != "" {
return cfg.LoadJSONFromHTTPSource(jcfg.Source)
}
// Load Cluster section. Needs to have been registered
if cfg.clusterConfig != nil && jcfg.Cluster != nil {
@ -446,6 +487,10 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
return nil, err
}
if cfg.Source != "" {
return DefaultJSONMarshal(&jsonConfig{Source: cfg.Source})
}
jcfg := cfg.jsonCfg
if jcfg == nil {
jcfg = &jsonConfig{}

View File

@ -1,55 +1,14 @@
// Package config provides interfaces and utilities for different Cluster
// components to register, read, write and validate configuration sections
// stored in a central configuration file.
package config_test
package config
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/ipfs/ipfs-cluster/config"
)
type mockCfg struct {
config.Saver
}
func (m *mockCfg) ConfigKey() string {
return "mock"
}
func (m *mockCfg) LoadJSON([]byte) error {
return nil
}
func (m *mockCfg) ToJSON() ([]byte, error) {
return []byte(`{"a":"b"}`), nil
}
func (m *mockCfg) Default() error {
return nil
}
func (m *mockCfg) ApplyEnvVars() error {
return nil
}
func (m *mockCfg) Validate() error {
return nil
}
func setupConfigManager() *config.Manager {
cfg := config.NewManager()
mockCfg := &mockCfg{}
cfg.RegisterComponent(config.Cluster, mockCfg)
for _, sect := range config.SectionTypes() {
cfg.RegisterComponent(sect, mockCfg)
}
return cfg
}
func TestManager_ToJSON(t *testing.T) {
want := []byte(`{
var mockJSON = []byte(`{
"cluster": {
"a": "b"
},
@ -104,6 +63,46 @@ func TestManager_ToJSON(t *testing.T) {
}
}
}`)
type mockCfg struct {
Saver
}
func (m *mockCfg) ConfigKey() string {
return "mock"
}
func (m *mockCfg) LoadJSON([]byte) error {
return nil
}
func (m *mockCfg) ToJSON() ([]byte, error) {
return []byte(`{"a":"b"}`), nil
}
func (m *mockCfg) Default() error {
return nil
}
func (m *mockCfg) ApplyEnvVars() error {
return nil
}
func (m *mockCfg) Validate() error {
return nil
}
func setupConfigManager() *Manager {
cfg := NewManager()
mockCfg := &mockCfg{}
cfg.RegisterComponent(Cluster, mockCfg)
for _, sect := range SectionTypes() {
cfg.RegisterComponent(sect, mockCfg)
}
return cfg
}
func TestManager_ToJSON(t *testing.T) {
cfgMgr := setupConfigManager()
err := cfgMgr.Default()
if err != nil {
@ -114,7 +113,66 @@ func TestManager_ToJSON(t *testing.T) {
t.Error(err)
}
if !bytes.Equal(got, want) {
t.Errorf("mismatch between got: %s and want: %s", got, want)
if !bytes.Equal(got, mockJSON) {
t.Errorf("mismatch between got: %s and want: %s", got, mockJSON)
}
}
func TestLoadFromHTTPSourceRedirect(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
json := fmt.Sprintf(`{ "source" : "http://%s/config" }`, r.Host)
w.Write([]byte(json))
})
s := httptest.NewServer(mux)
defer s.Close()
cfgMgr := NewManager()
err := cfgMgr.LoadJSONFromHTTPSource(s.URL + "/config")
if err != errSourceRedirect {
t.Fatal("expected errSourceRedirect")
}
}
func TestLoadFromHTTPSource(t *testing.T) {
mux := http.NewServeMux()
mux.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
w.Write(mockJSON)
})
s := httptest.NewServer(mux)
defer s.Close()
cfgMgr := setupConfigManager()
err := cfgMgr.LoadJSONFromHTTPSource(s.URL + "/config")
if err != nil {
t.Fatal("unexpected error")
}
cfgMgr.Source = ""
newJSON, err := cfgMgr.ToJSON()
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(newJSON, mockJSON) {
t.Error("generated json different than loaded")
}
}
func TestSaveWithSource(t *testing.T) {
cfgMgr := setupConfigManager()
cfgMgr.Default()
cfgMgr.Source = "http://a.b.c"
newJSON, err := cfgMgr.ToJSON()
if err != nil {
t.Fatal(err)
}
expected := []byte(`{
"source": "http://a.b.c"
}`)
if !bytes.Equal(newJSON, expected) {
t.Error("should have generated a source-only json")
}
}

View File

@ -35,25 +35,30 @@ type identityJSON struct {
PrivateKey string `json:"private_key"`
}
// NewIdentity generate a public-private keypair and returns a new Identity.
// NewIdentity returns a new random identity.
func NewIdentity() (*Identity, error) {
ident := &Identity{}
err := ident.Default()
return ident, err
}
// Default generates a random keypair for this identity.
func (ident *Identity) Default() error {
// pid and private key generation
priv, pub, err := crypto.GenerateKeyPair(
DefaultConfigCrypto,
DefaultConfigKeyLength,
)
if err != nil {
return nil, err
return err
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
return nil, err
return err
}
return &Identity{
ID: pid,
PrivateKey: priv,
}, nil
ident.ID = pid
ident.PrivateKey = priv
return nil
}
// ConfigKey returns a human-readable string to identify

View File

@ -7,15 +7,13 @@ test_description="Test init functionality"
test_expect_success "cluster-service init with --peers succeeds and fills peerstore" '
PEER1=/ip4/192.168.0.129/tcp/9196/ipfs/12D3KooWRN8KRjpyg9rsW2w7StbBRGper65psTZm68cjud9KAkaW
PEER2=/ip4/192.168.0.129/tcp/9196/ipfs/12D3KooWPwrYNj7VficHw5qYidepMGA85756kYgMdNmRM9A1ZHjN
echo $PEER1 >> testPeerstore
echo $PEER2 >> testPeerstore
ipfs-cluster-service --config "test-config" init --peers $PEER1,$PEER2 &&
grep -q $PEER1 test-config/peerstore &&
grep -q $PEER2 test-config/peerstore
'
test_expect_success "cluster-service init without --peers succeeds and creates empty peerstore" '
ipfs-cluster-service -f --config "test-config" init &&
ipfs-cluster-service --config "test-config" init -f &&
[ -f "test-config/peerstore" ] &&
[ ! -s "test-config/peerstore" ]
'