2018-08-23 12:12:55 +00:00
|
|
|
// The ipfs-cluster-service application.
|
2016-12-21 18:37:25 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-07-04 20:39:10 +00:00
|
|
|
"bufio"
|
2019-07-25 08:47:44 +00:00
|
|
|
"context"
|
2019-08-09 14:00:55 +00:00
|
|
|
"errors"
|
2016-12-21 18:37:25 +00:00
|
|
|
"fmt"
|
2017-12-19 17:05:32 +00:00
|
|
|
"io"
|
2016-12-21 18:37:25 +00:00
|
|
|
"os"
|
|
|
|
"os/user"
|
|
|
|
"path/filepath"
|
2019-07-25 08:47:44 +00:00
|
|
|
"strings"
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2022-01-04 17:13:16 +00:00
|
|
|
ipfslite "github.com/hsanjuan/ipfs-lite"
|
2022-06-15 09:19:17 +00:00
|
|
|
ipfscluster "github.com/ipfs-cluster/ipfs-cluster"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/api"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
|
2022-01-04 17:13:16 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/consensus/crdt"
|
2022-06-15 09:19:17 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/pstoremgr"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/version"
|
2022-09-06 14:57:17 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p/core/peer"
|
2019-07-25 08:47:44 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
2018-09-26 15:45:02 +00:00
|
|
|
|
|
|
|
semver "github.com/blang/semver"
|
2022-01-04 17:13:16 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
"github.com/ipfs/go-datastore/namespace"
|
|
|
|
dscrdt "github.com/ipfs/go-ds-crdt"
|
2020-03-13 20:40:02 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2018-09-26 15:45:02 +00:00
|
|
|
cli "github.com/urfave/cli"
|
2016-12-21 18:37:25 +00:00
|
|
|
)
|
|
|
|
|
2016-12-22 16:14:15 +00:00
|
|
|
// ProgramName of this application
|
2019-11-30 02:38:54 +00:00
|
|
|
const programName = "ipfs-cluster-service"
|
2016-12-21 18:37:25 +00:00
|
|
|
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
// flag defaults
|
|
|
|
const (
|
2019-12-12 20:22:54 +00:00
|
|
|
defaultLogLevel = "info"
|
|
|
|
defaultConsensus = "crdt"
|
2023-12-07 10:06:27 +00:00
|
|
|
defaultDatastore = "pebble"
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
)
|
|
|
|
|
2018-10-18 13:21:11 +00:00
|
|
|
const (
|
2019-05-16 13:31:36 +00:00
|
|
|
stateCleanupPrompt = "The peer state will be removed. Existing pins may be lost."
|
|
|
|
configurationOverwritePrompt = "The configuration file will be overwritten."
|
2018-10-18 13:21:11 +00:00
|
|
|
)
|
2018-10-18 04:23:47 +00:00
|
|
|
|
2017-01-24 15:55:37 +00:00
|
|
|
// We store a commit id here
|
|
|
|
var commit string
|
|
|
|
|
2016-12-21 18:37:25 +00:00
|
|
|
// Description provides a short summary of the functionality of this tool
|
|
|
|
var Description = fmt.Sprintf(`
|
2019-11-30 02:38:54 +00:00
|
|
|
%s runs an IPFS Cluster peer.
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2019-11-30 02:38:54 +00:00
|
|
|
A peer participates in the cluster consensus, follows a distributed log
|
2017-01-24 18:55:06 +00:00
|
|
|
of pinning and unpinning requests and manages pinning operations to a
|
|
|
|
configured IPFS daemon.
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2019-11-30 02:38:54 +00:00
|
|
|
This peer also provides an API for cluster management, an IPFS Proxy API which
|
2016-12-21 18:37:25 +00:00
|
|
|
forwards requests to IPFS and a number of components for internal communication
|
2017-02-02 13:34:51 +00:00
|
|
|
using LibP2P. This is a simplified view of the components:
|
|
|
|
|
|
|
|
+------------------+
|
|
|
|
| ipfs-cluster-ctl |
|
|
|
|
+---------+--------+
|
|
|
|
|
|
2017-12-12 16:47:21 +00:00
|
|
|
| HTTP(s)
|
2017-02-02 13:34:51 +00:00
|
|
|
ipfs-cluster-service | HTTP
|
|
|
|
+----------+--------+--v--+----------------------+ +-------------+
|
2021-06-09 17:40:36 +00:00
|
|
|
| RPC | Peer 1 | API | IPFS Connector/Proxy +------> IPFS daemon |
|
2017-02-02 13:34:51 +00:00
|
|
|
+----^-----+--------+-----+----------------------+ +-------------+
|
|
|
|
| libp2p
|
|
|
|
|
|
|
|
|
+----v-----+--------+-----+----------------------+ +-------------+
|
2021-06-09 17:40:36 +00:00
|
|
|
| RPC | Peer 2 | API | IPFS Connector/Proxy +------> IPFS daemon |
|
2017-02-02 13:34:51 +00:00
|
|
|
+----^-----+--------+-----+----------------------+ +-------------+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
+----v-----+--------+-----+----------------------+ +-------------+
|
2021-06-09 17:40:36 +00:00
|
|
|
| RPC | Peer 3 | API | IPFS Connector/Proxy +------> IPFS daemon |
|
2017-02-02 13:34:51 +00:00
|
|
|
+----------+--------+-----+----------------------+ +-------------+
|
|
|
|
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
%s needs valid configuration and identity files to run.
|
|
|
|
These are independent from IPFS. The identity includes its own
|
|
|
|
libp2p key-pair. They can be initialized with "init" and their
|
|
|
|
default locations are ~/%s/%s
|
|
|
|
and ~/%s/%s.
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2016-12-22 16:14:15 +00:00
|
|
|
For feedback, bug reports or any additional information, visit
|
2022-06-15 09:19:17 +00:00
|
|
|
https://github.com/ipfs-cluster/ipfs-cluster.
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
EXAMPLES:
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
Initial configuration:
|
|
|
|
|
|
|
|
$ ipfs-cluster-service init
|
|
|
|
|
|
|
|
Launch a cluster:
|
|
|
|
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
$ ipfs-cluster-service daemon
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
Launch a peer and join existing cluster:
|
|
|
|
|
2019-08-15 11:19:26 +00:00
|
|
|
$ ipfs-cluster-service daemon --bootstrap /ip4/192.168.1.2/tcp/9096/p2p/QmPSoSaPXpyunaBwHs1rZBKYSqRV4bLRk32VGYLuvdrypL
|
2019-12-04 17:52:38 +00:00
|
|
|
|
|
|
|
Customize logs using --loglevel flag. To customize component-level
|
|
|
|
logging pass a comma-separated list of component-identifer:log-level
|
|
|
|
pair or without identifier for overall loglevel. Valid loglevels
|
|
|
|
are critical, error, warning, notice, info and debug.
|
|
|
|
|
|
|
|
$ ipfs-cluster-service --loglevel info,cluster:debug,pintracker:debug daemon
|
2016-12-21 18:37:25 +00:00
|
|
|
`,
|
2016-12-22 16:14:15 +00:00
|
|
|
programName,
|
|
|
|
programName,
|
2019-05-16 13:31:36 +00:00
|
|
|
DefaultFolder,
|
|
|
|
DefaultConfigFile,
|
|
|
|
DefaultFolder,
|
|
|
|
DefaultIdentityFile,
|
|
|
|
)
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
var logger = logging.Logger("service")
|
|
|
|
|
2016-12-21 18:37:25 +00:00
|
|
|
// Default location for the configurations and data
|
|
|
|
var (
|
2019-05-16 13:31:36 +00:00
|
|
|
// DefaultFolder is the name of the cluster folder
|
|
|
|
DefaultFolder = ".ipfs-cluster"
|
|
|
|
// DefaultPath is set on init() to $HOME/DefaultFolder
|
2016-12-21 18:37:25 +00:00
|
|
|
// and holds all the ipfs-cluster data
|
2017-11-14 12:13:32 +00:00
|
|
|
DefaultPath string
|
2016-12-21 18:37:25 +00:00
|
|
|
// The name of the configuration file inside DefaultPath
|
2017-01-23 12:34:22 +00:00
|
|
|
DefaultConfigFile = "service.json"
|
2019-04-30 04:50:41 +00:00
|
|
|
// The name of the identity file inside DefaultPath
|
|
|
|
DefaultIdentityFile = "identity.json"
|
2016-12-21 18:37:25 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-04-30 04:50:41 +00:00
|
|
|
configPath string
|
|
|
|
identityPath string
|
2016-12-21 18:37:25 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
2018-09-26 15:45:02 +00:00
|
|
|
// Set build information.
|
|
|
|
if build, err := semver.NewBuildVersion(commit); err == nil {
|
2018-12-18 14:44:11 +00:00
|
|
|
version.Version.Build = []string{"git" + build}
|
2018-09-26 15:45:02 +00:00
|
|
|
}
|
2017-07-12 14:51:32 +00:00
|
|
|
|
2017-11-14 12:13:32 +00:00
|
|
|
// We try guessing user's home from the HOME variable. This
|
|
|
|
// allows HOME hacks for things like Snapcraft builds. HOME
|
|
|
|
// should be set in all UNIX by the OS. Alternatively, we fall back to
|
|
|
|
// usr.HomeDir (which should work on Windows etc.).
|
|
|
|
home := os.Getenv("HOME")
|
|
|
|
if home == "" {
|
|
|
|
usr, err := user.Current()
|
|
|
|
if err != nil {
|
2017-11-14 17:01:32 +00:00
|
|
|
panic(fmt.Sprintf("cannot get current user: %s", err))
|
2017-11-14 12:13:32 +00:00
|
|
|
}
|
|
|
|
home = usr.HomeDir
|
2016-12-22 16:14:15 +00:00
|
|
|
}
|
2017-11-14 12:13:32 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
DefaultPath = filepath.Join(home, DefaultFolder)
|
2016-12-21 18:37:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func out(m string, a ...interface{}) {
|
|
|
|
fmt.Fprintf(os.Stderr, m, a...)
|
|
|
|
}
|
|
|
|
|
2018-03-23 01:08:00 +00:00
|
|
|
func checkErr(doing string, err error, args ...interface{}) {
|
2017-01-24 18:55:06 +00:00
|
|
|
if err != nil {
|
2018-03-23 01:08:00 +00:00
|
|
|
if len(args) > 0 {
|
2019-05-16 13:31:36 +00:00
|
|
|
doing = fmt.Sprintf(doing, args...)
|
2018-03-23 01:08:00 +00:00
|
|
|
}
|
2017-01-24 18:55:06 +00:00
|
|
|
out("error %s: %s\n", doing, err)
|
2017-12-05 03:46:52 +00:00
|
|
|
err = locker.tryUnlock()
|
|
|
|
if err != nil {
|
|
|
|
out("error releasing execution lock: %s\n", err)
|
|
|
|
}
|
2017-01-24 18:55:06 +00:00
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 18:37:25 +00:00
|
|
|
func main() {
|
2017-01-24 18:55:06 +00:00
|
|
|
app := cli.NewApp()
|
|
|
|
app.Name = programName
|
2019-11-30 02:38:54 +00:00
|
|
|
app.Usage = "IPFS Cluster peer"
|
2017-02-02 22:52:06 +00:00
|
|
|
app.Description = Description
|
|
|
|
//app.Copyright = "© Protocol Labs, Inc."
|
2018-12-18 14:44:11 +00:00
|
|
|
app.Version = version.Version.String()
|
2017-01-24 18:55:06 +00:00
|
|
|
app.Flags = []cli.Flag{
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "config, c",
|
|
|
|
Value: DefaultPath,
|
|
|
|
Usage: "path to the configuration and data `FOLDER`",
|
|
|
|
EnvVar: "IPFS_CLUSTER_PATH",
|
|
|
|
},
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "force, f",
|
2017-02-02 22:52:06 +00:00
|
|
|
Usage: "forcefully proceed with some actions. i.e. overwriting configuration",
|
|
|
|
},
|
2017-01-24 18:55:06 +00:00
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "debug, d",
|
2017-02-02 22:52:06 +00:00
|
|
|
Usage: "enable full debug logging (very verbose)",
|
2017-01-24 18:55:06 +00:00
|
|
|
},
|
|
|
|
cli.StringFlag{
|
2019-11-16 08:54:50 +00:00
|
|
|
Name: "loglevel, l",
|
2019-12-04 17:40:35 +00:00
|
|
|
EnvVar: "IPFS_CLUSTER_LOG_LEVEL",
|
2019-12-04 17:52:38 +00:00
|
|
|
Usage: "set overall and component-wise log levels",
|
2017-01-24 18:55:06 +00:00
|
|
|
},
|
|
|
|
}
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2019-04-30 04:50:41 +00:00
|
|
|
app.Before = func(c *cli.Context) error {
|
|
|
|
absPath, err := filepath.Abs(c.String("config"))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
configPath = filepath.Join(absPath, DefaultConfigFile)
|
|
|
|
identityPath = filepath.Join(absPath, DefaultIdentityFile)
|
|
|
|
|
2019-12-04 17:40:35 +00:00
|
|
|
err = setupLogLevel(c.Bool("debug"), c.String("loglevel"))
|
2019-11-25 07:27:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-04-30 04:50:41 +00:00
|
|
|
}
|
|
|
|
locker = &lock{path: absPath}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-24 18:55:06 +00:00
|
|
|
app.Commands = []cli.Command{
|
|
|
|
{
|
|
|
|
Name: "init",
|
2019-05-16 13:31:36 +00:00
|
|
|
Usage: "Creates a configuration and generates an identity",
|
2017-07-24 15:26:46 +00:00
|
|
|
Description: fmt.Sprintf(`
|
2019-05-16 13:31:36 +00:00
|
|
|
This command will initialize a new %s configuration file and, if it
|
|
|
|
does already exist, generate a new %s for %s.
|
2017-07-24 15:26:46 +00:00
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
If the optional [source-url] is given, the generated configuration file
|
2019-08-09 10:56:27 +00:00
|
|
|
will refer to it. The source configuration will be fetched from its source
|
|
|
|
URL during the launch of the daemon. If not, a default standard configuration
|
|
|
|
file will be created.
|
2017-07-24 15:26:46 +00:00
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
In the latter case, a cluster secret will be generated as required
|
|
|
|
by %s. Alternatively, this secret can be manually
|
|
|
|
provided with --custom-secret (in which case it will be prompted), or
|
|
|
|
by setting the CLUSTER_SECRET environment variable.
|
|
|
|
|
|
|
|
The --consensus flag allows to select an alternative consensus components for
|
|
|
|
in the newly-generated configuration.
|
2019-08-09 10:56:27 +00:00
|
|
|
|
|
|
|
Note that the --force flag allows to overwrite an existing
|
2019-05-16 13:31:36 +00:00
|
|
|
configuration with default values. To generate a new identity, please
|
|
|
|
remove the %s file first and clean any Raft state.
|
2019-07-25 08:47:44 +00:00
|
|
|
|
|
|
|
By default, an empty peerstore file will be created too. Initial contents can
|
2019-08-09 14:00:55 +00:00
|
|
|
be provided with the --peers flag. Depending on the chosen consensus, the
|
|
|
|
"trusted_peers" list in the "crdt" configuration section and the
|
|
|
|
"init_peerset" list in the "raft" configuration section will be prefilled to
|
|
|
|
the peer IDs in the given multiaddresses.
|
2019-05-16 13:31:36 +00:00
|
|
|
`,
|
2019-08-09 14:00:55 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
DefaultConfigFile,
|
|
|
|
DefaultIdentityFile,
|
|
|
|
programName,
|
|
|
|
programName,
|
|
|
|
DefaultIdentityFile,
|
|
|
|
),
|
2019-08-09 10:56:27 +00:00
|
|
|
ArgsUsage: "[http-source-url]",
|
2017-07-04 20:39:10 +00:00
|
|
|
Flags: []cli.Flag{
|
2019-08-09 14:00:55 +00:00
|
|
|
cli.StringFlag{
|
|
|
|
Name: "consensus",
|
2022-11-29 16:09:19 +00:00
|
|
|
Usage: "select consensus: 'crdt' or 'raft'",
|
2019-08-09 14:00:55 +00:00
|
|
|
Value: defaultConsensus,
|
|
|
|
},
|
2021-06-09 17:40:36 +00:00
|
|
|
cli.StringFlag{
|
|
|
|
Name: "datastore",
|
2023-12-07 10:06:27 +00:00
|
|
|
Usage: "select datastore: 'badger', 'badger3', 'leveldb' or 'pebble'",
|
2021-06-09 17:40:36 +00:00
|
|
|
Value: defaultDatastore,
|
|
|
|
},
|
2017-07-04 20:39:10 +00:00
|
|
|
cli.BoolFlag{
|
2017-07-24 15:26:46 +00:00
|
|
|
Name: "custom-secret, s",
|
2019-08-09 10:56:27 +00:00
|
|
|
Usage: "prompt for the cluster secret (when no source specified)",
|
2017-07-24 15:26:46 +00:00
|
|
|
},
|
2019-07-25 08:47:44 +00:00
|
|
|
cli.StringFlag{
|
|
|
|
Name: "peers",
|
2019-08-09 10:56:27 +00:00
|
|
|
Usage: "comma-separated list of multiaddresses to init with (see help)",
|
|
|
|
},
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "force, f",
|
|
|
|
Usage: "overwrite configuration without prompting",
|
2019-07-25 08:47:44 +00:00
|
|
|
},
|
2019-10-24 15:54:50 +00:00
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "randomports",
|
|
|
|
Usage: "configure random ports to listen on instead of defaults",
|
|
|
|
},
|
2017-07-04 20:39:10 +00:00
|
|
|
},
|
2017-01-24 18:55:06 +00:00
|
|
|
Action: func(c *cli.Context) error {
|
2019-08-09 14:00:55 +00:00
|
|
|
consensus := c.String("consensus")
|
2021-06-09 17:40:36 +00:00
|
|
|
switch consensus {
|
|
|
|
case "raft", "crdt":
|
|
|
|
default:
|
2019-08-09 14:00:55 +00:00
|
|
|
checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'"))
|
|
|
|
}
|
|
|
|
|
2021-06-09 17:40:36 +00:00
|
|
|
datastore := c.String("datastore")
|
|
|
|
switch datastore {
|
2022-12-01 15:58:32 +00:00
|
|
|
case "leveldb", "badger", "badger3", "pebble":
|
2021-06-09 17:40:36 +00:00
|
|
|
default:
|
2022-12-01 15:58:32 +00:00
|
|
|
checkErr("choosing datastore", errors.New("flag value must be set to 'leveldb', 'badger', 'badger3' or 'pebble'"))
|
2021-06-09 17:40:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus, datastore)
|
2019-08-09 10:56:27 +00:00
|
|
|
defer cfgHelper.Manager().Shutdown() // wait for saves
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
configExists := false
|
2018-10-04 15:08:27 +00:00
|
|
|
if _, err := os.Stat(configPath); !os.IsNotExist(err) {
|
2019-05-16 13:31:36 +00:00
|
|
|
configExists = true
|
2018-10-04 15:08:27 +00:00
|
|
|
}
|
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
identityExists := false
|
|
|
|
if _, err := os.Stat(identityPath); !os.IsNotExist(err) {
|
|
|
|
identityExists = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if configExists || identityExists {
|
|
|
|
// cluster might be running
|
2018-10-04 15:08:27 +00:00
|
|
|
// acquire lock for config folder
|
2019-02-20 14:24:25 +00:00
|
|
|
locker.lock()
|
2018-10-04 15:08:27 +00:00
|
|
|
defer locker.tryUnlock()
|
2019-05-16 13:31:36 +00:00
|
|
|
}
|
2018-10-04 15:08:27 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
if configExists {
|
2019-02-20 14:24:25 +00:00
|
|
|
confirm := fmt.Sprintf(
|
2019-05-16 13:31:36 +00:00
|
|
|
"%s Continue? [y/n]:",
|
2019-02-20 14:24:25 +00:00
|
|
|
configurationOverwritePrompt,
|
|
|
|
)
|
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
// --force allows override of the prompt
|
2019-08-09 10:56:27 +00:00
|
|
|
if !c.Bool("force") {
|
2019-05-16 13:31:36 +00:00
|
|
|
if !yesNoPrompt(confirm) {
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-04 15:08:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 10:56:27 +00:00
|
|
|
// Set url. If exists, it will be the only thing saved.
|
|
|
|
cfgHelper.Manager().Source = c.Args().First()
|
|
|
|
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
// Generate defaults for all registered components
|
2019-08-09 10:56:27 +00:00
|
|
|
err := cfgHelper.Manager().Default()
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
checkErr("generating default configuration", err)
|
2020-02-28 16:16:16 +00:00
|
|
|
|
2019-10-24 15:54:50 +00:00
|
|
|
if c.Bool("randomports") {
|
|
|
|
cfgs := cfgHelper.Configs()
|
|
|
|
|
2020-02-28 16:16:16 +00:00
|
|
|
cfgs.Cluster.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Cluster.ListenAddr)
|
2019-10-24 15:54:50 +00:00
|
|
|
checkErr("randomizing ports", err)
|
|
|
|
cfgs.Restapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Restapi.HTTPListenAddr)
|
|
|
|
checkErr("randomizing ports", err)
|
|
|
|
cfgs.Ipfsproxy.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Ipfsproxy.ListenAddr)
|
|
|
|
checkErr("randomizing ports", err)
|
2022-03-10 23:07:26 +00:00
|
|
|
cfgs.Pinsvcapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Pinsvcapi.HTTPListenAddr)
|
|
|
|
checkErr("randomizing ports", err)
|
2019-10-24 15:54:50 +00:00
|
|
|
}
|
2019-08-09 10:56:27 +00:00
|
|
|
err = cfgHelper.Manager().ApplyEnvVars()
|
2019-02-07 18:46:42 +00:00
|
|
|
checkErr("applying environment variables to configuration", err)
|
|
|
|
|
2019-08-09 10:56:27 +00:00
|
|
|
userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret") && !c.Args().Present())
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
// Set user secret
|
|
|
|
if userSecretDefined {
|
2019-08-09 10:56:27 +00:00
|
|
|
cfgHelper.Configs().Cluster.Secret = userSecret
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 08:47:44 +00:00
|
|
|
peersOpt := c.String("peers")
|
|
|
|
var multiAddrs []ma.Multiaddr
|
|
|
|
if peersOpt != "" {
|
|
|
|
addrs := strings.Split(peersOpt, ",")
|
|
|
|
|
|
|
|
for _, addr := range addrs {
|
|
|
|
addr = strings.TrimSpace(addr)
|
|
|
|
multiAddr, err := ma.NewMultiaddr(addr)
|
|
|
|
checkErr("parsing peer multiaddress: "+addr, err)
|
|
|
|
multiAddrs = append(multiAddrs, multiAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
peers := ipfscluster.PeersFromMultiaddrs(multiAddrs)
|
2019-08-06 16:44:58 +00:00
|
|
|
cfgHelper.Configs().Crdt.TrustAll = false
|
2019-08-09 10:56:27 +00:00
|
|
|
cfgHelper.Configs().Crdt.TrustedPeers = peers
|
|
|
|
cfgHelper.Configs().Raft.InitPeerset = peers
|
2019-07-25 08:47:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save config. Creates the folder.
|
|
|
|
// Sets BaseDir in components.
|
2019-08-12 14:58:06 +00:00
|
|
|
checkErr("saving default configuration", cfgHelper.SaveConfigToDisk())
|
|
|
|
out("configuration written to %s.\n", configPath)
|
2019-04-30 04:50:41 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
if !identityExists {
|
2019-08-09 10:56:27 +00:00
|
|
|
ident := cfgHelper.Identity()
|
|
|
|
err := ident.Default()
|
2019-05-16 13:31:36 +00:00
|
|
|
checkErr("generating an identity", err)
|
2019-05-06 08:19:46 +00:00
|
|
|
|
2019-05-16 13:31:36 +00:00
|
|
|
err = ident.ApplyEnvVars()
|
|
|
|
checkErr("applying environment variables to the identity", err)
|
2019-04-30 04:50:41 +00:00
|
|
|
|
2019-08-09 10:56:27 +00:00
|
|
|
err = cfgHelper.SaveIdentityToDisk()
|
2019-05-16 13:31:36 +00:00
|
|
|
checkErr("saving "+DefaultIdentityFile, err)
|
|
|
|
out("new identity written to %s\n", identityPath)
|
|
|
|
}
|
2019-07-25 08:47:44 +00:00
|
|
|
|
|
|
|
// Initialize peerstore file - even if empty
|
2019-08-09 10:56:27 +00:00
|
|
|
peerstorePath := cfgHelper.Configs().Cluster.GetPeerstorePath()
|
2019-07-25 08:47:44 +00:00
|
|
|
peerManager := pstoremgr.New(context.Background(), nil, peerstorePath)
|
|
|
|
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
|
|
|
checkErr("getting AddrInfos from peer multiaddresses", err)
|
|
|
|
err = peerManager.SavePeerstore(addrInfos)
|
|
|
|
checkErr("saving peers to peerstore", err)
|
2019-08-12 14:58:06 +00:00
|
|
|
if l := len(multiAddrs); l > 0 {
|
|
|
|
out("peerstore written to %s with %d entries.\n", peerstorePath, len(multiAddrs))
|
|
|
|
} else {
|
|
|
|
out("new empty peerstore written to %s.\n", peerstorePath)
|
|
|
|
}
|
2019-07-25 08:47:44 +00:00
|
|
|
|
2017-01-24 18:55:06 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2017-02-02 13:34:51 +00:00
|
|
|
{
|
2018-02-08 19:27:43 +00:00
|
|
|
Name: "daemon",
|
2019-05-16 13:31:36 +00:00
|
|
|
Usage: "Runs the IPFS Cluster peer (default)",
|
2018-02-08 19:27:43 +00:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "upgrade, u",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "run state migrations before starting (deprecated/unused)",
|
2018-02-08 19:27:43 +00:00
|
|
|
},
|
2019-08-11 08:17:17 +00:00
|
|
|
cli.StringFlag{
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
Name: "bootstrap, j",
|
2019-08-11 08:17:17 +00:00
|
|
|
Usage: "join a cluster providing a comma-separated list of existing peers multiaddress(es)",
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
},
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "leave, x",
|
|
|
|
Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"",
|
|
|
|
Hidden: true,
|
|
|
|
},
|
2018-06-27 04:03:15 +00:00
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "stats",
|
|
|
|
Usage: "enable stats collection",
|
|
|
|
},
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "tracing",
|
|
|
|
Usage: "enable tracing collection",
|
|
|
|
},
|
2019-07-29 08:17:43 +00:00
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "no-trust",
|
|
|
|
Usage: "do not trust bootstrap peers (only for \"crdt\" consensus)",
|
|
|
|
},
|
2018-02-08 19:27:43 +00:00
|
|
|
},
|
2017-10-20 10:45:42 +00:00
|
|
|
Action: daemon,
|
2017-02-02 13:34:51 +00:00
|
|
|
},
|
2017-11-28 22:45:10 +00:00
|
|
|
{
|
|
|
|
Name: "state",
|
2022-01-04 17:13:16 +00:00
|
|
|
Usage: "Manages the peer's persistent state (pinset)",
|
2017-11-28 22:45:10 +00:00
|
|
|
Subcommands: []cli.Command{
|
2022-01-04 17:13:16 +00:00
|
|
|
{
|
|
|
|
Name: "crdt",
|
|
|
|
Usage: "CRDT-state commands",
|
|
|
|
Before: func(c *cli.Context) error {
|
|
|
|
// Load all the configurations and identity
|
|
|
|
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
|
|
|
|
cfgs := cfgHelper.Configs()
|
|
|
|
checkErr("loading configurations", err)
|
|
|
|
defer cfgHelper.Manager().Shutdown()
|
|
|
|
|
|
|
|
if cfgHelper.GetConsensus() != cfgs.Crdt.ConfigKey() {
|
|
|
|
checkErr("", errors.New("crdt subcommands can only be run on peers initialized with crdt consensus"))
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
|
|
|
|
Subcommands: []cli.Command{
|
2022-09-26 17:33:46 +00:00
|
|
|
{
|
|
|
|
Name: "info",
|
|
|
|
Usage: "Print information about the CRDT store",
|
|
|
|
Description: `
|
|
|
|
This commands prints basic information: current heads, dirty flag etc.
|
|
|
|
`,
|
|
|
|
Flags: []cli.Flag{},
|
|
|
|
Action: func(c *cli.Context) error {
|
|
|
|
locker.lock()
|
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
|
|
|
crdt := getCrdt()
|
|
|
|
info := crdt.InternalStats()
|
|
|
|
fmt.Printf(
|
|
|
|
"Number of heads: %d. Current max-height: %d. Dirty: %t\nHeads: %s",
|
|
|
|
len(info.Heads),
|
|
|
|
info.MaxHeight,
|
|
|
|
crdt.IsDirty(),
|
|
|
|
info.Heads,
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2022-01-04 17:13:16 +00:00
|
|
|
{
|
|
|
|
Name: "dot",
|
|
|
|
Usage: "Write the CRDT-DAG as DOT file",
|
|
|
|
Description: `
|
|
|
|
This command generates a DOT file representing the CRDT-DAG of this node.
|
|
|
|
The DOT file can then be visualized, converted to SVG etc.
|
|
|
|
|
|
|
|
This is a debugging command to visualize how the DAG looks like, whether there
|
|
|
|
is a lot of branching etc. large DAGs will generate large DOT files.
|
|
|
|
Use with caution!
|
|
|
|
`,
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "file, f",
|
|
|
|
Value: "",
|
|
|
|
Usage: "writes to file instead of stdout",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Action: func(c *cli.Context) error {
|
|
|
|
locker.lock()
|
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
2022-09-26 17:33:46 +00:00
|
|
|
crdt := getCrdt()
|
2022-01-04 17:13:16 +00:00
|
|
|
|
2022-09-26 17:33:46 +00:00
|
|
|
var err error
|
2022-01-04 17:13:16 +00:00
|
|
|
var w io.WriteCloser
|
|
|
|
outputPath := c.String("file")
|
|
|
|
if outputPath == "" {
|
|
|
|
// Output to stdout
|
|
|
|
w = os.Stdout
|
|
|
|
} else {
|
|
|
|
// Create the export file
|
|
|
|
w, err = os.Create(outputPath)
|
|
|
|
checkErr("creating output file", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 256KiB of buffer size.
|
|
|
|
buf := bufio.NewWriterSize(w, 1<<18)
|
|
|
|
defer buf.Flush()
|
|
|
|
|
|
|
|
logger.Info("initiating CDRT-DAG DOT file export. Export might take a long time on large graphs")
|
|
|
|
checkErr("generating graph", crdt.DotDAG(buf))
|
|
|
|
logger.Info("dot file ")
|
|
|
|
return nil
|
|
|
|
|
|
|
|
},
|
|
|
|
},
|
2022-09-26 17:33:46 +00:00
|
|
|
{
|
|
|
|
Name: "mark-dirty",
|
|
|
|
Usage: "Marks the CRDT-store as dirty",
|
|
|
|
Description: `
|
|
|
|
Marking the CRDT store as dirty will force-run a Repair operation on the next
|
|
|
|
run (i.e. next time the cluster peer is started).
|
|
|
|
`,
|
|
|
|
Flags: []cli.Flag{},
|
|
|
|
Action: func(c *cli.Context) error {
|
|
|
|
locker.lock()
|
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
|
|
|
crdt := getCrdt()
|
|
|
|
crdt.MarkDirty()
|
|
|
|
fmt.Println("Datastore marked 'dirty'")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "mark-clean",
|
|
|
|
Usage: "Marks the CRDT-store as clean",
|
|
|
|
Description: `
|
|
|
|
This command remove the dirty-mark on the CRDT-store, which means no
|
|
|
|
DAG operations will be run.
|
|
|
|
`,
|
|
|
|
Flags: []cli.Flag{},
|
|
|
|
Action: func(c *cli.Context) error {
|
|
|
|
locker.lock()
|
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
|
|
|
crdt := getCrdt()
|
|
|
|
crdt.MarkClean()
|
|
|
|
fmt.Println("Datastore marked 'clean'")
|
|
|
|
return nil
|
|
|
|
|
|
|
|
},
|
|
|
|
},
|
2022-01-04 17:13:16 +00:00
|
|
|
},
|
|
|
|
},
|
2017-12-19 17:05:32 +00:00
|
|
|
{
|
|
|
|
Name: "export",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "save the state to a JSON file",
|
2017-12-19 17:05:32 +00:00
|
|
|
Description: `
|
2019-02-20 14:24:25 +00:00
|
|
|
This command dumps the current cluster pinset (state) as a JSON file. The
|
|
|
|
resulting file can be used to migrate, restore or backup a Cluster peer.
|
|
|
|
By default, the state will be printed to stdout.
|
2017-12-19 17:05:32 +00:00
|
|
|
`,
|
|
|
|
Flags: []cli.Flag{
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "file, f",
|
|
|
|
Value: "",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "writes to an output file",
|
|
|
|
},
|
2017-12-19 17:05:32 +00:00
|
|
|
},
|
|
|
|
Action: func(c *cli.Context) error {
|
2019-02-20 14:24:25 +00:00
|
|
|
locker.lock()
|
2017-12-19 17:05:32 +00:00
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
mgr := getStateManager()
|
2019-07-26 15:43:31 +00:00
|
|
|
|
2017-12-19 17:05:32 +00:00
|
|
|
var w io.WriteCloser
|
2019-02-20 14:24:25 +00:00
|
|
|
var err error
|
2017-12-19 17:05:32 +00:00
|
|
|
outputPath := c.String("file")
|
|
|
|
if outputPath == "" {
|
|
|
|
// Output to stdout
|
|
|
|
w = os.Stdout
|
|
|
|
} else {
|
|
|
|
// Create the export file
|
|
|
|
w, err = os.Create(outputPath)
|
|
|
|
checkErr("creating output file", err)
|
|
|
|
}
|
|
|
|
|
2022-01-04 17:15:06 +00:00
|
|
|
buf := bufio.NewWriter(w)
|
|
|
|
defer func() {
|
|
|
|
buf.Flush()
|
|
|
|
w.Close()
|
|
|
|
}()
|
|
|
|
checkErr("exporting state", mgr.ExportState(buf))
|
2019-02-20 14:24:25 +00:00
|
|
|
logger.Info("state successfully exported")
|
2017-12-19 17:05:32 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "import",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "load the state from a file produced by 'export'",
|
2017-12-19 17:05:32 +00:00
|
|
|
Description: `
|
2019-02-20 14:24:25 +00:00
|
|
|
This command reads in an exported pinset (state) file and replaces the
|
|
|
|
existing one. This can be used, for example, to restore a Cluster peer from a
|
2019-05-16 13:31:36 +00:00
|
|
|
backup.
|
2019-02-20 14:24:25 +00:00
|
|
|
|
|
|
|
If an argument is provided, it will be treated it as the path of the file
|
|
|
|
to import. If no argument is provided, stdin will be used.
|
2017-12-19 17:05:32 +00:00
|
|
|
`,
|
2018-10-04 15:55:46 +00:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "force, f",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "skips confirmation prompt",
|
|
|
|
},
|
2021-12-14 10:21:19 +00:00
|
|
|
cli.IntFlag{
|
|
|
|
Name: "replication-min, rmin",
|
|
|
|
Value: 0,
|
|
|
|
Usage: "Overwrite replication-factor-min for all pins on import",
|
|
|
|
},
|
|
|
|
cli.IntFlag{
|
|
|
|
Name: "replication-max, rmax",
|
|
|
|
Value: 0,
|
|
|
|
Usage: "Overwrite replication-factor-max for all pins on import",
|
|
|
|
},
|
|
|
|
cli.StringFlag{
|
|
|
|
Name: "allocations, allocs",
|
|
|
|
Usage: "Overwrite allocations for all pins on import. Comma-separated list of peer IDs",
|
|
|
|
},
|
2018-10-04 15:55:46 +00:00
|
|
|
},
|
2017-12-19 17:05:32 +00:00
|
|
|
Action: func(c *cli.Context) error {
|
2019-02-20 14:24:25 +00:00
|
|
|
locker.lock()
|
2017-12-19 17:05:32 +00:00
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
confirm := "The pinset (state) of this peer "
|
|
|
|
confirm += "will be replaced. Continue? [y/n]:"
|
|
|
|
if !c.Bool("force") && !yesNoPrompt(confirm) {
|
|
|
|
return nil
|
2017-12-19 17:05:32 +00:00
|
|
|
}
|
|
|
|
|
2021-12-14 10:21:19 +00:00
|
|
|
// importState allows overwriting of some options on import
|
|
|
|
opts := api.PinOptions{
|
|
|
|
ReplicationFactorMin: c.Int("replication-min"),
|
|
|
|
ReplicationFactorMax: c.Int("replication-max"),
|
|
|
|
UserAllocations: api.StringsToPeers(strings.Split(c.String("allocations"), ",")),
|
|
|
|
}
|
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
mgr := getStateManager()
|
2019-07-26 15:43:31 +00:00
|
|
|
|
2017-12-19 17:05:32 +00:00
|
|
|
// Get the importing file path
|
|
|
|
importFile := c.Args().First()
|
|
|
|
var r io.ReadCloser
|
2019-02-20 14:24:25 +00:00
|
|
|
var err error
|
2017-12-19 17:05:32 +00:00
|
|
|
if importFile == "" {
|
|
|
|
r = os.Stdin
|
2019-02-20 14:24:25 +00:00
|
|
|
fmt.Println("reading from stdin, Ctrl-D to finish")
|
2017-12-19 17:05:32 +00:00
|
|
|
} else {
|
|
|
|
r, err = os.Open(importFile)
|
|
|
|
checkErr("reading import file", err)
|
|
|
|
}
|
|
|
|
defer r.Close()
|
2019-02-20 14:24:25 +00:00
|
|
|
|
2022-01-04 17:15:06 +00:00
|
|
|
buf := bufio.NewReader(r)
|
|
|
|
|
|
|
|
checkErr("importing state", mgr.ImportState(buf, opts))
|
2019-02-20 14:24:25 +00:00
|
|
|
logger.Info("state successfully imported. Make sure all peers have consistent states")
|
2017-12-19 17:05:32 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "cleanup",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "remove persistent data",
|
2017-12-19 17:05:32 +00:00
|
|
|
Description: `
|
2019-02-20 14:24:25 +00:00
|
|
|
This command removes any persisted consensus data in this peer, including the
|
2022-01-04 17:13:16 +00:00
|
|
|
current pinset (state). The next start of the peer will be like a first start
|
2019-02-20 14:24:25 +00:00
|
|
|
to all effects. Peers may need to bootstrap and sync from scratch after this.
|
2017-12-19 17:05:32 +00:00
|
|
|
`,
|
2018-10-04 15:55:46 +00:00
|
|
|
Flags: []cli.Flag{
|
|
|
|
cli.BoolFlag{
|
|
|
|
Name: "force, f",
|
2019-02-20 14:24:25 +00:00
|
|
|
Usage: "skip confirmation prompt",
|
|
|
|
},
|
2018-10-04 15:55:46 +00:00
|
|
|
},
|
2017-12-19 17:05:32 +00:00
|
|
|
Action: func(c *cli.Context) error {
|
2019-02-20 14:24:25 +00:00
|
|
|
locker.lock()
|
2017-12-19 17:05:32 +00:00
|
|
|
defer locker.tryUnlock()
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
confirm := fmt.Sprintf(
|
|
|
|
"%s Continue? [y/n]:",
|
|
|
|
stateCleanupPrompt,
|
|
|
|
)
|
|
|
|
if !c.Bool("force") && !yesNoPrompt(confirm) {
|
|
|
|
return nil
|
2017-12-19 17:05:32 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
mgr := getStateManager()
|
2019-02-20 14:24:25 +00:00
|
|
|
checkErr("cleaning state", mgr.Clean())
|
|
|
|
logger.Info("data correctly cleaned up")
|
2017-12-19 17:05:32 +00:00
|
|
|
return nil
|
2017-11-28 22:45:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-12-12 16:47:21 +00:00
|
|
|
{
|
|
|
|
Name: "version",
|
2019-05-16 13:31:36 +00:00
|
|
|
Usage: "Prints the ipfs-cluster version",
|
2017-12-12 16:47:21 +00:00
|
|
|
Action: func(c *cli.Context) error {
|
2018-12-18 14:44:11 +00:00
|
|
|
fmt.Printf("%s\n", version.Version)
|
2017-12-12 16:47:21 +00:00
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
2017-01-24 18:55:06 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 13:34:51 +00:00
|
|
|
app.Action = run
|
2017-01-24 18:55:06 +00:00
|
|
|
|
2017-02-02 13:34:51 +00:00
|
|
|
app.Run(os.Args)
|
|
|
|
}
|
|
|
|
|
2017-10-20 10:45:42 +00:00
|
|
|
// run daemon() by default, or error.
|
2017-02-02 13:34:51 +00:00
|
|
|
func run(c *cli.Context) error {
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
cli.ShowAppHelp(c)
|
|
|
|
os.Exit(1)
|
|
|
|
return nil
|
2016-12-21 18:37:25 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 17:40:35 +00:00
|
|
|
func setupLogLevel(debug bool, l string) error {
|
2019-11-16 08:54:50 +00:00
|
|
|
// if debug is set to true, log everything in debug level
|
|
|
|
if debug {
|
|
|
|
ipfscluster.SetFacilityLogLevel("*", "DEBUG")
|
|
|
|
return nil
|
2017-03-14 16:32:00 +00:00
|
|
|
}
|
2016-12-21 18:37:25 +00:00
|
|
|
|
2019-12-04 17:40:35 +00:00
|
|
|
compLogLevel := strings.Split(l, ",")
|
2019-11-25 07:27:59 +00:00
|
|
|
var logLevel string
|
|
|
|
compLogFacs := make(map[string]string)
|
|
|
|
// get overall log level and component-wise log levels from arguments
|
|
|
|
for _, cll := range compLogLevel {
|
|
|
|
if cll == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
identifierToLevel := strings.Split(cll, ":")
|
2019-12-04 17:40:35 +00:00
|
|
|
var lvl string
|
|
|
|
var comp string
|
2019-11-25 07:27:59 +00:00
|
|
|
switch len(identifierToLevel) {
|
|
|
|
case 1:
|
2019-12-04 17:40:35 +00:00
|
|
|
lvl = identifierToLevel[0]
|
|
|
|
comp = "all"
|
2019-11-25 07:27:59 +00:00
|
|
|
case 2:
|
2019-12-04 17:40:35 +00:00
|
|
|
lvl = identifierToLevel[1]
|
|
|
|
comp = identifierToLevel[0]
|
2019-11-25 07:27:59 +00:00
|
|
|
default:
|
2019-11-25 11:11:49 +00:00
|
|
|
return errors.New("log level not in expected format \"identifier:loglevel\" or \"loglevel\"")
|
2019-11-25 07:27:59 +00:00
|
|
|
}
|
2019-12-04 17:40:35 +00:00
|
|
|
|
|
|
|
_, ok := compLogFacs[comp]
|
|
|
|
if ok {
|
|
|
|
fmt.Printf("overwriting existing %s log level\n", comp)
|
|
|
|
}
|
|
|
|
compLogFacs[comp] = lvl
|
2019-11-25 07:27:59 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 17:40:35 +00:00
|
|
|
logLevel, ok := compLogFacs["all"]
|
|
|
|
if !ok {
|
2019-11-25 07:27:59 +00:00
|
|
|
logLevel = defaultLogLevel
|
2019-12-04 17:40:35 +00:00
|
|
|
} else {
|
|
|
|
delete(compLogFacs, "all")
|
2019-11-25 07:27:59 +00:00
|
|
|
}
|
|
|
|
|
2019-11-16 08:54:50 +00:00
|
|
|
// log service with logLevel
|
|
|
|
ipfscluster.SetFacilityLogLevel("service", logLevel)
|
|
|
|
|
2019-11-25 11:48:50 +00:00
|
|
|
logfacs := make(map[string]string)
|
2019-11-25 11:11:49 +00:00
|
|
|
|
|
|
|
// fill component-wise log levels
|
|
|
|
for identifier, level := range compLogFacs {
|
|
|
|
logfacs[identifier] = level
|
|
|
|
}
|
|
|
|
|
2021-08-06 09:28:22 +00:00
|
|
|
// Set the values for things not set by the user or for
|
|
|
|
// things set by "all".
|
|
|
|
for key := range ipfscluster.LoggingFacilities {
|
|
|
|
if _, ok := logfacs[key]; !ok {
|
|
|
|
logfacs[key] = logLevel
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For Extra facilities, set the defaults per logging.go unless
|
|
|
|
// manually set
|
|
|
|
for key, defaultLvl := range ipfscluster.LoggingFacilitiesExtra {
|
|
|
|
if _, ok := logfacs[key]; !ok {
|
|
|
|
logfacs[key] = defaultLvl
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-16 08:54:50 +00:00
|
|
|
for identifier, level := range logfacs {
|
|
|
|
ipfscluster.SetFacilityLogLevel(identifier, level)
|
2017-03-14 16:32:00 +00:00
|
|
|
}
|
2019-11-14 15:16:22 +00:00
|
|
|
|
|
|
|
return nil
|
2018-01-15 09:49:07 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 19:10:52 +00:00
|
|
|
func userProvidedSecret(enterSecret bool) ([]byte, bool) {
|
|
|
|
if enterSecret {
|
2019-02-07 18:46:42 +00:00
|
|
|
secret := promptUser("Enter cluster secret (32-byte hex string): ")
|
|
|
|
decodedSecret, err := ipfscluster.DecodeClusterSecret(secret)
|
|
|
|
checkErr("parsing user-provided secret", err)
|
|
|
|
return decodedSecret, true
|
2017-07-28 19:10:52 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 18:46:42 +00:00
|
|
|
return nil, false
|
2017-07-28 19:10:52 +00:00
|
|
|
}
|
|
|
|
|
2017-07-04 20:39:10 +00:00
|
|
|
func promptUser(msg string) string {
|
|
|
|
scanner := bufio.NewScanner(os.Stdin)
|
|
|
|
fmt.Print(msg)
|
|
|
|
scanner.Scan()
|
|
|
|
return scanner.Text()
|
|
|
|
}
|
2017-07-19 16:54:57 +00:00
|
|
|
|
2017-12-19 17:05:32 +00:00
|
|
|
// Lifted from go-ipfs/cmd/ipfs/daemon.go
|
|
|
|
func yesNoPrompt(prompt string) bool {
|
|
|
|
var s string
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
fmt.Printf("%s ", prompt)
|
|
|
|
fmt.Scanf("%s", &s)
|
|
|
|
switch s {
|
|
|
|
case "y", "Y":
|
|
|
|
return true
|
|
|
|
case "n", "N":
|
|
|
|
return false
|
|
|
|
case "":
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
fmt.Println("Please press either 'y' or 'n'")
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2019-08-09 10:56:27 +00:00
|
|
|
|
2019-08-09 14:00:55 +00:00
|
|
|
func getStateManager() cmdutils.StateManager {
|
2019-11-30 02:40:06 +00:00
|
|
|
cfgHelper, err := cmdutils.NewLoadedConfigHelper(
|
|
|
|
configPath,
|
|
|
|
identityPath,
|
2019-08-09 10:56:27 +00:00
|
|
|
)
|
2019-11-30 02:40:06 +00:00
|
|
|
checkErr("loading configurations", err)
|
|
|
|
cfgHelper.Manager().Shutdown()
|
|
|
|
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
|
2019-12-02 15:08:47 +00:00
|
|
|
checkErr("creating state manager", err)
|
2019-08-09 10:56:27 +00:00
|
|
|
return mgr
|
|
|
|
}
|
2022-09-26 17:33:46 +00:00
|
|
|
|
|
|
|
func getCrdt() *dscrdt.Datastore {
|
|
|
|
// Load all the configurations and identity
|
|
|
|
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
|
|
|
|
checkErr("loading configurations", err)
|
|
|
|
defer cfgHelper.Manager().Shutdown()
|
|
|
|
|
|
|
|
// Get a state manager and the datastore
|
|
|
|
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
|
|
|
|
checkErr("creating state manager", err)
|
|
|
|
store, err := mgr.GetStore()
|
|
|
|
checkErr("opening datastore", err)
|
|
|
|
batching, ok := store.(datastore.Batching)
|
|
|
|
if !ok {
|
|
|
|
checkErr("", errors.New("no batching store"))
|
|
|
|
}
|
|
|
|
|
|
|
|
crdtNs := cfgHelper.Configs().Crdt.DatastoreNamespace
|
|
|
|
|
|
|
|
var blocksDatastore datastore.Batching = namespace.Wrap(
|
|
|
|
batching,
|
|
|
|
datastore.NewKey(crdtNs).ChildString(crdt.BlocksNs),
|
|
|
|
)
|
|
|
|
|
|
|
|
ipfs, err := ipfslite.New(
|
|
|
|
context.Background(),
|
|
|
|
blocksDatastore,
|
|
|
|
nil,
|
|
|
|
nil,
|
2023-01-27 13:34:53 +00:00
|
|
|
nil,
|
2022-09-26 17:33:46 +00:00
|
|
|
&ipfslite.Config{
|
|
|
|
Offline: true,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
checkErr("creating ipfs-lite offline node", err)
|
|
|
|
|
|
|
|
opts := dscrdt.DefaultOptions()
|
|
|
|
opts.RepairInterval = 0
|
|
|
|
crdt, err := dscrdt.New(
|
|
|
|
batching,
|
|
|
|
datastore.NewKey(crdtNs),
|
|
|
|
ipfs,
|
|
|
|
nil,
|
|
|
|
opts,
|
|
|
|
)
|
|
|
|
checkErr("creating crdt node", err)
|
|
|
|
return crdt
|
|
|
|
}
|