2017-03-14 15:37:29 +00:00
|
|
|
// Package raft implements a Consensus component for IPFS Cluster which uses
|
|
|
|
// Raft (go-libp2p-raft).
|
2017-03-10 16:24:25 +00:00
|
|
|
package raft
|
2016-12-02 18:33:39 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2017-10-31 10:20:14 +00:00
|
|
|
"fmt"
|
2017-11-08 19:04:04 +00:00
|
|
|
"sort"
|
2016-12-15 13:07:19 +00:00
|
|
|
"sync"
|
2016-12-09 19:54:46 +00:00
|
|
|
"time"
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2022-06-15 09:19:17 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/api"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/state"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/state/dsstate"
|
2017-02-08 17:04:08 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
2020-03-13 20:40:02 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2016-12-16 11:40:28 +00:00
|
|
|
consensus "github.com/libp2p/go-libp2p-consensus"
|
2018-10-17 13:28:03 +00:00
|
|
|
rpc "github.com/libp2p/go-libp2p-gorpc"
|
2016-12-16 11:40:28 +00:00
|
|
|
libp2praft "github.com/libp2p/go-libp2p-raft"
|
2022-09-06 14:57:17 +00:00
|
|
|
host "github.com/libp2p/go-libp2p/core/host"
|
|
|
|
peer "github.com/libp2p/go-libp2p/core/peer"
|
2019-06-14 10:41:11 +00:00
|
|
|
|
|
|
|
"go.opencensus.io/tag"
|
|
|
|
"go.opencensus.io/trace"
|
2016-12-02 18:33:39 +00:00
|
|
|
)
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
var logger = logging.Logger("raft")
|
2017-03-10 16:24:25 +00:00
|
|
|
|
2016-12-15 18:08:46 +00:00
|
|
|
// Consensus handles the work of keeping a shared-state between
|
2017-01-26 18:59:31 +00:00
|
|
|
// the peers of an IPFS Cluster, as well as modifying that state and
|
2016-12-02 18:33:39 +00:00
|
|
|
// applying any updates in a thread-safe manner.
|
2016-12-15 18:08:46 +00:00
|
|
|
type Consensus struct {
|
2017-03-02 12:57:37 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
2017-10-23 11:46:37 +00:00
|
|
|
config *Config
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
host host.Host
|
|
|
|
|
2016-12-02 18:33:39 +00:00
|
|
|
consensus consensus.OpLogConsensus
|
|
|
|
actor consensus.Actor
|
2017-02-13 15:46:53 +00:00
|
|
|
baseOp *LogOp
|
2017-10-23 11:46:37 +00:00
|
|
|
raft *raftWrapper
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
rpcClient *rpc.Client
|
|
|
|
rpcReady chan struct{}
|
2017-01-30 12:12:25 +00:00
|
|
|
readyCh chan struct{}
|
2016-12-15 13:07:19 +00:00
|
|
|
|
2018-08-15 10:27:01 +00:00
|
|
|
shutdownLock sync.RWMutex
|
2016-12-15 13:07:19 +00:00
|
|
|
shutdown bool
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
// NewConsensus builds a new ClusterConsensus component using Raft.
|
|
|
|
//
|
|
|
|
// Raft saves state snapshots regularly and persists log data in a bolt
|
|
|
|
// datastore. Therefore, unless memory usage is a concern, it is recommended
|
|
|
|
// to use an in-memory go-datastore as store parameter.
|
|
|
|
//
|
|
|
|
// The staging parameter controls if the Raft peer should start in
|
|
|
|
// staging mode (used when joining a new Raft peerset with other peers).
|
|
|
|
//
|
|
|
|
// The store parameter should be a thread-safe datastore.
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
func NewConsensus(
|
|
|
|
host host.Host,
|
|
|
|
cfg *Config,
|
2019-02-20 14:24:25 +00:00
|
|
|
store ds.Datastore,
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
staging bool, // this peer must not be bootstrapped if no state exists
|
|
|
|
) (*Consensus, error) {
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
err := cfg.Validate()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-04-22 13:26:40 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
Issue #162: Rework configuration format
The following commit reimplements ipfs-cluster configuration under
the following premises:
* Each component is initialized with a configuration object
defined by its module
* Each component decides how the JSON representation of its
configuration looks like
* Each component parses and validates its own configuration
* Each component exposes its own defaults
* Component configurations are make the sections of a
central JSON configuration file (which replaces the current
JSON format)
* Component configurations implement a common interface
(config.ComponentConfig) with a set of common operations
* The central configuration file is managed by a
config.ConfigManager which:
* Registers ComponentConfigs
* Assigns the correspondent sections from the JSON file to each
component and delegates the parsing
* Delegates the JSON generation for each section
* Can be notified when the configuration is updated and must be
saved to disk
The new service.json would then look as follows:
```json
{
"cluster": {
"id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2",
"private_key": "<...>",
"secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786",
"peers": [],
"bootstrap": [],
"leave_on_shutdown": false,
"listen_multiaddress": "/ip4/0.0.0.0/tcp/9096",
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "15s"
},
"consensus": {
"raft": {
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
"max_append_entries": 64,
"trailing_logs": 10240,
"snapshot_interval": "2m0s",
"snapshot_threshold": 8192,
"leader_lease_timeout": "500ms"
}
},
"api": {
"restapi": {
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9094",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s"
}
},
"ipfs_connector": {
"ipfshttp": {
"proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"proxy_read_timeout": "10m0s",
"proxy_read_header_timeout": "5s",
"proxy_write_timeout": "10m0s",
"proxy_idle_timeout": "1m0s"
}
},
"monitor": {
"monbasic": {
"check_interval": "15s"
}
},
"informer": {
"disk": {
"metric_ttl": "30s",
"metric_type": "freespace"
},
"numpin": {
"metric_ttl": "10s"
}
}
}
```
This new format aims to be easily extensible per component. As such,
it already surfaces quite a few new options which were hardcoded
before.
Additionally, since Go API have changed, some redundant methods have been
removed and small refactoring has happened to take advantage of the new
way.
License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
|
|
|
|
2017-12-12 16:47:21 +00:00
|
|
|
logger.Debug("starting Consensus and waiting for a leader...")
|
2019-02-20 14:24:25 +00:00
|
|
|
baseOp := &LogOp{tracing: cfg.Tracing}
|
|
|
|
state, err := dsstate.New(
|
2022-04-22 13:26:40 +00:00
|
|
|
ctx,
|
2019-02-20 14:24:25 +00:00
|
|
|
store,
|
|
|
|
cfg.DatastoreNamespace,
|
|
|
|
dsstate.DefaultHandle(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-04-22 13:26:40 +00:00
|
|
|
cancel()
|
2019-02-20 14:24:25 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2017-10-30 11:45:08 +00:00
|
|
|
consensus := libp2praft.NewOpLog(state, baseOp)
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
raft, err := newRaftWrapper(host, cfg, consensus.FSM(), staging)
|
2017-02-01 17:16:09 +00:00
|
|
|
if err != nil {
|
2017-10-23 11:46:37 +00:00
|
|
|
logger.Error("error creating raft: ", err)
|
2022-04-22 13:26:40 +00:00
|
|
|
cancel()
|
2017-02-01 17:16:09 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
actor := libp2praft.NewActor(raft.raft)
|
|
|
|
consensus.SetActor(actor)
|
|
|
|
|
2016-12-15 18:08:46 +00:00
|
|
|
cc := &Consensus{
|
2017-03-02 12:57:37 +00:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
2017-10-23 11:46:37 +00:00
|
|
|
config: cfg,
|
2017-03-02 12:57:37 +00:00
|
|
|
host: host,
|
|
|
|
consensus: consensus,
|
|
|
|
actor: actor,
|
2017-10-30 11:45:08 +00:00
|
|
|
baseOp: baseOp,
|
2017-03-02 12:57:37 +00:00
|
|
|
raft: raft,
|
|
|
|
rpcReady: make(chan struct{}, 1),
|
|
|
|
readyCh: make(chan struct{}, 1),
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
2016-12-09 19:54:46 +00:00
|
|
|
|
2017-10-30 11:45:08 +00:00
|
|
|
baseOp.consensus = cc
|
|
|
|
|
2017-03-02 12:57:37 +00:00
|
|
|
go cc.finishBootstrap()
|
2016-12-02 18:33:39 +00:00
|
|
|
return cc, nil
|
|
|
|
}
|
|
|
|
|
2017-02-02 22:52:06 +00:00
|
|
|
// WaitForSync waits for a leader and for the state to be up to date, then returns.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) WaitForSync(ctx context.Context) error {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/WaitForSync")
|
|
|
|
defer span.End()
|
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
leaderCtx, cancel := context.WithTimeout(
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx,
|
2017-10-23 11:46:37 +00:00
|
|
|
cc.config.WaitForLeaderTimeout)
|
2017-02-02 22:52:06 +00:00
|
|
|
defer cancel()
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
|
|
|
|
// 1 - wait for leader
|
|
|
|
// 2 - wait until we are a Voter
|
|
|
|
// 3 - wait until last index is applied
|
|
|
|
|
|
|
|
// From raft docs:
|
|
|
|
|
|
|
|
// once a staging server receives enough log entries to be sufficiently
|
|
|
|
// caught up to the leader's log, the leader will invoke a membership
|
|
|
|
// change to change the Staging server to a Voter
|
|
|
|
|
|
|
|
// Thus, waiting to be a Voter is a guarantee that we have a reasonable
|
|
|
|
// up to date state. Otherwise, we might return too early (see
|
2022-06-15 09:19:17 +00:00
|
|
|
// https://github.com/ipfs-cluster/ipfs-cluster/issues/378)
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
_, err := cc.raft.WaitForLeader(leaderCtx)
|
2017-02-02 22:52:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.New("error waiting for leader: " + err.Error())
|
|
|
|
}
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
err = cc.raft.WaitForVoter(ctx)
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.New("error waiting to become a Voter: " + err.Error())
|
|
|
|
}
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
err = cc.raft.WaitForUpdates(ctx)
|
2017-02-02 22:52:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.New("error waiting for consensus updates: " + err.Error())
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-01 17:16:09 +00:00
|
|
|
// waits until there is a consensus leader and syncs the state
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
// to the tracker. If errors happen, this will return and never
|
|
|
|
// signal the component as Ready.
|
2017-02-01 17:16:09 +00:00
|
|
|
func (cc *Consensus) finishBootstrap() {
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
// wait until we have RPC to perform any actions.
|
2018-08-15 10:27:01 +00:00
|
|
|
select {
|
|
|
|
case <-cc.ctx.Done():
|
|
|
|
return
|
|
|
|
case <-cc.rpcReady:
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
// Sometimes bootstrap is a no-op. It only applies when
|
|
|
|
// no state exists and staging=false.
|
|
|
|
_, err := cc.raft.Bootstrap()
|
2017-01-30 12:12:25 +00:00
|
|
|
if err != nil {
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
return
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
err = cc.WaitForSync(cc.ctx)
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logger.Debug("Raft state is now up to date")
|
2017-02-02 22:52:06 +00:00
|
|
|
logger.Debug("consensus ready")
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
cc.readyCh <- struct{}{}
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-02 18:33:39 +00:00
|
|
|
// Shutdown stops the component so it will not process any
|
|
|
|
// more updates. The underlying consensus is permanently
|
|
|
|
// shutdown, along with the libp2p transport.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) Shutdown(ctx context.Context) error {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/Shutdown")
|
|
|
|
defer span.End()
|
|
|
|
|
2016-12-15 13:07:19 +00:00
|
|
|
cc.shutdownLock.Lock()
|
|
|
|
defer cc.shutdownLock.Unlock()
|
|
|
|
|
|
|
|
if cc.shutdown {
|
|
|
|
logger.Debug("already shutdown")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Info("stopping Consensus component")
|
2016-12-15 13:07:19 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// Raft Shutdown
|
2018-06-27 04:03:15 +00:00
|
|
|
err := cc.raft.Shutdown(ctx)
|
2016-12-15 13:07:19 +00:00
|
|
|
if err != nil {
|
2017-10-23 11:46:37 +00:00
|
|
|
logger.Error(err)
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
2017-11-08 19:04:04 +00:00
|
|
|
|
|
|
|
if cc.config.hostShutdown {
|
|
|
|
cc.host.Close()
|
|
|
|
}
|
|
|
|
|
2016-12-15 13:07:19 +00:00
|
|
|
cc.shutdown = true
|
2017-10-23 11:46:37 +00:00
|
|
|
cc.cancel()
|
|
|
|
close(cc.rpcReady)
|
2016-12-02 18:33:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
// SetClient makes the component ready to perform RPC requets
|
|
|
|
func (cc *Consensus) SetClient(c *rpc.Client) {
|
|
|
|
cc.rpcClient = c
|
|
|
|
cc.rpcReady <- struct{}{}
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
// Ready returns a channel which is signaled when the Consensus
|
|
|
|
// algorithm has finished bootstrapping and is ready to use
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
|
2020-04-14 17:58:00 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "consensus/Ready")
|
2018-06-27 04:03:15 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
return cc.readyCh
|
|
|
|
}
|
|
|
|
|
2019-05-09 13:14:26 +00:00
|
|
|
// IsTrustedPeer returns true. In Raft we trust all peers.
|
2019-05-09 20:36:03 +00:00
|
|
|
func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool {
|
2019-05-09 13:14:26 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-05-09 17:48:40 +00:00
|
|
|
// Trust is a no-op.
|
2019-05-09 20:36:03 +00:00
|
|
|
func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil }
|
2019-05-09 17:48:40 +00:00
|
|
|
|
|
|
|
// Distrust is a no-op.
|
2019-05-09 20:36:03 +00:00
|
|
|
func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil }
|
2019-05-09 17:48:40 +00:00
|
|
|
|
2022-03-19 01:52:46 +00:00
|
|
|
func (cc *Consensus) op(ctx context.Context, pin api.Pin, t LogOpType) *LogOp {
|
2017-11-08 19:04:04 +00:00
|
|
|
return &LogOp{
|
2019-02-27 17:04:35 +00:00
|
|
|
Cid: pin,
|
2017-11-08 19:04:04 +00:00
|
|
|
Type: t,
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2017-01-23 13:01:49 +00:00
|
|
|
// returns true if the operation was redirected to the leader
|
2017-10-23 11:46:37 +00:00
|
|
|
// note that if the leader just dissappeared, the rpc call will
|
|
|
|
// fail because we haven't heard that it's gone.
|
2022-09-21 15:31:33 +00:00
|
|
|
func (cc *Consensus) redirectToLeader(ctx context.Context, method string, arg interface{}) (bool, error) {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/redirectToLeader")
|
2018-06-27 04:03:15 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
var finalErr error
|
|
|
|
|
|
|
|
// Retry redirects
|
|
|
|
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
|
|
logger.Debugf("redirect try %d", i)
|
2018-06-27 04:03:15 +00:00
|
|
|
leader, err := cc.Leader(ctx)
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// No leader, wait for one
|
2017-02-02 22:52:06 +00:00
|
|
|
if err != nil {
|
2020-03-13 20:40:02 +00:00
|
|
|
logger.Warn("there seems to be no leader. Waiting for one")
|
2017-10-23 11:46:37 +00:00
|
|
|
rctx, cancel := context.WithTimeout(
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx,
|
|
|
|
cc.config.WaitForLeaderTimeout,
|
|
|
|
)
|
2017-10-23 11:46:37 +00:00
|
|
|
defer cancel()
|
|
|
|
pidstr, err := cc.raft.WaitForLeader(rctx)
|
|
|
|
|
|
|
|
// means we timed out waiting for a leader
|
|
|
|
// we don't retry in this case
|
|
|
|
if err != nil {
|
Fix: repinning does not re-allocate as needed
Long story: Since #1768 there has been a recurring repinning test failure with
Raft consensus.
Per the test, if a pin is allocated to a peer that has been shutdown,
submitting the pin again should re-allocate it to a peer that is still
running.
Investigation on why this test fails and why it fails only in Raft lead to
realizing that this and other similar tests, were passing by chance. The
needed re-allocations were made not by the new submission of the pin, but by
the automatic-repinning feature. The actual resubmitted pin was carrying the
same allocations (one of them being the peer that was down), but it was
silently failing because the RedirectToLeader() code path was using
cc.ctx and hitting the peer that had been shutdown, which caused it to error.
Fixing the context propagation, meant that we would re-overwrite the pin with
the old allocations, thus the actual behaviour did not pass the test.
So, on one side, this fix an number of tests that had not disabled automatic
repinning and was probably getting in the way of things. On the other side,
this removes a condition that prevents re-allocation of pins if they exists
and options have not changed.
I don't fully understand why this was there though, since the Allocate() code
does return the old allocations anyways when they are enough, so it should not
re-allocate randomly. I suspect this was preventing some misbehaviour in the
Allocate() code from the time before it was improved with multiple allocators
etc.
2022-09-26 19:35:24 +00:00
|
|
|
err = fmt.Errorf("timed out waiting for leader: %w", err)
|
|
|
|
logger.Error(err)
|
|
|
|
return false, err
|
2017-10-23 11:46:37 +00:00
|
|
|
}
|
2020-04-14 17:58:00 +00:00
|
|
|
leader, err = peer.Decode(pidstr)
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
Fix: repinning does not re-allocate as needed
Long story: Since #1768 there has been a recurring repinning test failure with
Raft consensus.
Per the test, if a pin is allocated to a peer that has been shutdown,
submitting the pin again should re-allocate it to a peer that is still
running.
Investigation on why this test fails and why it fails only in Raft lead to
realizing that this and other similar tests, were passing by chance. The
needed re-allocations were made not by the new submission of the pin, but by
the automatic-repinning feature. The actual resubmitted pin was carrying the
same allocations (one of them being the peer that was down), but it was
silently failing because the RedirectToLeader() code path was using
cc.ctx and hitting the peer that had been shutdown, which caused it to error.
Fixing the context propagation, meant that we would re-overwrite the pin with
the old allocations, thus the actual behaviour did not pass the test.
So, on one side, this fix an number of tests that had not disabled automatic
repinning and was probably getting in the way of things. On the other side,
this removes a condition that prevents re-allocation of pins if they exists
and options have not changed.
I don't fully understand why this was there though, since the Allocate() code
does return the old allocations anyways when they are enough, so it should not
re-allocate randomly. I suspect this was preventing some misbehaviour in the
Allocate() code from the time before it was improved with multiple allocators
etc.
2022-09-26 19:35:24 +00:00
|
|
|
logger.Error(err)
|
2017-10-23 11:46:37 +00:00
|
|
|
return false, err
|
|
|
|
}
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// We are the leader. Do not redirect
|
|
|
|
if leader == cc.host.ID() {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2017-11-15 01:33:46 +00:00
|
|
|
logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty())
|
2018-06-27 04:03:15 +00:00
|
|
|
finalErr = cc.rpcClient.CallContext(
|
|
|
|
ctx,
|
2017-10-23 11:46:37 +00:00
|
|
|
leader,
|
2019-05-04 20:36:10 +00:00
|
|
|
"Consensus",
|
2017-10-23 11:46:37 +00:00
|
|
|
method,
|
|
|
|
arg,
|
2018-06-27 04:03:15 +00:00
|
|
|
&struct{}{},
|
|
|
|
)
|
2017-10-23 11:46:37 +00:00
|
|
|
if finalErr != nil {
|
2019-02-27 17:04:35 +00:00
|
|
|
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
|
2017-10-23 11:46:37 +00:00
|
|
|
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
2017-01-23 13:01:49 +00:00
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// We tried to redirect, but something happened
|
|
|
|
return true, finalErr
|
2017-01-23 13:01:49 +00:00
|
|
|
}
|
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// commit submits a cc.consensus commit. It retries upon failures.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) commit(ctx context.Context, op *LogOp, rpcOp string, redirectArg interface{}) error {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/commit")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
if cc.config.Tracing {
|
|
|
|
// required to cross the serialized boundary
|
|
|
|
op.SpanCtx = span.SpanContext()
|
|
|
|
tagmap := tag.FromContext(ctx)
|
|
|
|
if tagmap != nil {
|
|
|
|
op.TagCtx = tag.Encode(tagmap)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-02 22:52:06 +00:00
|
|
|
var finalErr error
|
2017-10-23 11:46:37 +00:00
|
|
|
for i := 0; i <= cc.config.CommitRetries; i++ {
|
2017-10-30 09:59:03 +00:00
|
|
|
logger.Debugf("attempt #%d: committing %+v", i, op)
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// this means we are retrying
|
|
|
|
if finalErr != nil {
|
2017-10-31 10:20:14 +00:00
|
|
|
logger.Errorf("retrying upon failed commit (retry %d): %s ",
|
2017-10-23 11:46:37 +00:00
|
|
|
i, finalErr)
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2017-01-23 13:01:49 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// try to send it to the leader
|
|
|
|
// redirectToLeader has it's own retry loop. If this fails
|
|
|
|
// we're done here.
|
2022-09-21 15:31:33 +00:00
|
|
|
ok, err := cc.redirectToLeader(ctx, rpcOp, redirectArg)
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil || ok {
|
|
|
|
return err
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2017-01-23 13:01:49 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// Being here means we are the LEADER. We can commit.
|
2017-02-02 22:52:06 +00:00
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// now commit the changes to our state
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RLock() // do not shut down while committing
|
2017-10-31 10:20:14 +00:00
|
|
|
_, finalErr = cc.consensus.CommitOp(op)
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RUnlock()
|
2017-10-23 11:46:37 +00:00
|
|
|
if finalErr != nil {
|
|
|
|
goto RETRY
|
|
|
|
}
|
|
|
|
|
|
|
|
switch op.Type {
|
2017-10-30 10:27:39 +00:00
|
|
|
case LogOpPin:
|
|
|
|
logger.Infof("pin committed to global state: %s", op.Cid.Cid)
|
|
|
|
case LogOpUnpin:
|
|
|
|
logger.Infof("unpin committed to global state: %s", op.Cid.Cid)
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
|
|
|
break
|
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
RETRY:
|
|
|
|
time.Sleep(cc.config.CommitRetryDelay)
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
2017-10-23 11:46:37 +00:00
|
|
|
return finalErr
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 22:52:06 +00:00
|
|
|
// LogPin submits a Cid to the shared state of the cluster. It will forward
|
|
|
|
// the operation to the leader if this is not it.
|
2022-03-19 01:52:46 +00:00
|
|
|
func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/LogPin")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
op := cc.op(ctx, pin, LogOpPin)
|
2019-05-04 20:36:10 +00:00
|
|
|
err := cc.commit(ctx, op, "LogPin", pin)
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
|
|
|
|
2016-12-19 17:35:24 +00:00
|
|
|
// LogUnpin removes a Cid from the shared state of the cluster.
|
2022-03-19 01:52:46 +00:00
|
|
|
func (cc *Consensus) LogUnpin(ctx context.Context, pin api.Pin) error {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/LogUnpin")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
op := cc.op(ctx, pin, LogOpUnpin)
|
2019-05-04 20:36:10 +00:00
|
|
|
err := cc.commit(ctx, op, "LogUnpin", pin)
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2017-01-23 13:01:49 +00:00
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
// AddPeer adds a new peer to participate in this consensus. It will
|
2017-02-02 22:52:06 +00:00
|
|
|
// forward the operation to the leader if this is not it.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/AddPeer")
|
|
|
|
defer span.End()
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
var finalErr error
|
|
|
|
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
|
|
logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty())
|
|
|
|
if finalErr != nil {
|
|
|
|
logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr)
|
|
|
|
}
|
2022-09-21 15:31:33 +00:00
|
|
|
ok, err := cc.redirectToLeader(ctx, "AddPeer", pid)
|
2017-11-08 19:04:04 +00:00
|
|
|
if err != nil || ok {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Being here means we are the leader and can commit
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RLock() // do not shutdown while committing
|
2022-09-06 14:57:17 +00:00
|
|
|
finalErr = cc.raft.AddPeer(ctx, pid.String())
|
2019-02-20 14:24:25 +00:00
|
|
|
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RUnlock()
|
2017-11-08 19:04:04 +00:00
|
|
|
if finalErr != nil {
|
|
|
|
time.Sleep(cc.config.CommitRetryDelay)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.Infof("peer added to Raft: %s", pid.Pretty())
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return finalErr
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
// RmPeer removes a peer from this consensus. It will
|
2017-02-02 22:52:06 +00:00
|
|
|
// forward the operation to the leader if this is not it.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/RmPeer")
|
|
|
|
defer span.End()
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
var finalErr error
|
|
|
|
for i := 0; i <= cc.config.CommitRetries; i++ {
|
|
|
|
logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty())
|
|
|
|
if finalErr != nil {
|
2017-11-14 22:29:56 +00:00
|
|
|
logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr)
|
2017-11-08 19:04:04 +00:00
|
|
|
}
|
2022-09-21 15:31:33 +00:00
|
|
|
ok, err := cc.redirectToLeader(ctx, "RmPeer", pid)
|
2017-11-08 19:04:04 +00:00
|
|
|
if err != nil || ok {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Being here means we are the leader and can commit
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RLock() // do not shutdown while committing
|
2022-09-06 14:57:17 +00:00
|
|
|
finalErr = cc.raft.RemovePeer(ctx, pid.String())
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RUnlock()
|
2017-11-08 19:04:04 +00:00
|
|
|
if finalErr != nil {
|
|
|
|
time.Sleep(cc.config.CommitRetryDelay)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
logger.Infof("peer removed from Raft: %s", pid.Pretty())
|
|
|
|
break
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2017-11-08 19:04:04 +00:00
|
|
|
return finalErr
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
// State retrieves the current consensus State. It may error if no State has
|
|
|
|
// been agreed upon or the state is not consistent. The returned State is the
|
|
|
|
// last agreed-upon State known by this node. No writes are allowed, as all
|
|
|
|
// writes to the shared state should happen through the Consensus component
|
|
|
|
// methods.
|
|
|
|
func (cc *Consensus) State(ctx context.Context) (state.ReadOnly, error) {
|
2020-04-14 17:58:00 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "consensus/State")
|
2018-06-27 04:03:15 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2016-12-09 19:54:46 +00:00
|
|
|
st, err := cc.consensus.GetLogHead()
|
2019-02-20 14:24:25 +00:00
|
|
|
if err == libp2praft.ErrNoState {
|
|
|
|
return state.Empty(), nil
|
|
|
|
}
|
|
|
|
|
2016-12-09 19:54:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-03-10 16:24:25 +00:00
|
|
|
state, ok := st.(state.State)
|
2016-12-09 19:54:46 +00:00
|
|
|
if !ok {
|
2016-12-15 13:19:41 +00:00
|
|
|
return nil, errors.New("wrong state type")
|
2016-12-09 19:54:46 +00:00
|
|
|
}
|
|
|
|
return state, nil
|
|
|
|
}
|
|
|
|
|
2016-12-28 15:25:24 +00:00
|
|
|
// Leader returns the peerID of the Leader of the
|
|
|
|
// cluster. It returns an error when there is no leader.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
|
2020-04-14 17:58:00 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "consensus/Leader")
|
2018-06-27 04:03:15 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2017-10-23 11:46:37 +00:00
|
|
|
// Note the hard-dependency on raft here...
|
2016-12-02 18:33:39 +00:00
|
|
|
raftactor := cc.actor.(*libp2praft.Actor)
|
|
|
|
return raftactor.Leader()
|
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
// Clean removes the Raft persisted state.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) Clean(ctx context.Context) error {
|
2020-04-14 17:58:00 +00:00
|
|
|
_, span := trace.StartSpan(ctx, "consensus/Clean")
|
2018-06-27 04:03:15 +00:00
|
|
|
defer span.End()
|
|
|
|
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RLock()
|
|
|
|
defer cc.shutdownLock.RUnlock()
|
2017-11-01 12:25:28 +00:00
|
|
|
if !cc.shutdown {
|
|
|
|
return errors.New("consensus component is not shutdown")
|
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
return CleanupRaft(cc.config)
|
2017-11-01 12:25:28 +00:00
|
|
|
}
|
|
|
|
|
2016-12-28 15:25:24 +00:00
|
|
|
// Rollback replaces the current agreed-upon
|
|
|
|
// state with the state provided. Only the consensus leader
|
|
|
|
// can perform this operation.
|
2017-03-10 16:24:25 +00:00
|
|
|
func (cc *Consensus) Rollback(state state.State) error {
|
2017-10-23 11:46:37 +00:00
|
|
|
// This is unused. It *might* be used for upgrades.
|
|
|
|
// There is rather untested magic in libp2p-raft's FSM()
|
|
|
|
// to make this possible.
|
2016-12-02 18:33:39 +00:00
|
|
|
return cc.consensus.Rollback(state)
|
|
|
|
}
|
2017-10-23 11:46:37 +00:00
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
// Peers return the current list of peers in the consensus.
|
|
|
|
// The list will be sorted alphabetically.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
|
|
|
|
ctx, span := trace.StartSpan(ctx, "consensus/Peers")
|
|
|
|
defer span.End()
|
|
|
|
|
2018-08-15 10:27:01 +00:00
|
|
|
cc.shutdownLock.RLock() // prevent shutdown while here
|
|
|
|
defer cc.shutdownLock.RUnlock()
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
if cc.shutdown { // things hang a lot in this case
|
|
|
|
return nil, errors.New("consensus is shutdown")
|
|
|
|
}
|
|
|
|
peers := []peer.ID{}
|
2018-06-27 04:03:15 +00:00
|
|
|
raftPeers, err := cc.raft.Peers(ctx)
|
2017-11-08 19:04:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot retrieve list of peers: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(raftPeers)
|
|
|
|
|
|
|
|
for _, p := range raftPeers {
|
2020-04-14 17:58:00 +00:00
|
|
|
id, err := peer.Decode(p)
|
2017-11-08 19:04:04 +00:00
|
|
|
if err != nil {
|
|
|
|
panic("could not decode peer")
|
|
|
|
}
|
|
|
|
peers = append(peers, id)
|
|
|
|
}
|
|
|
|
return peers, nil
|
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
// OfflineState state returns a cluster state by reading the Raft data and
|
|
|
|
// writing it to the given datastore which is then wrapped as a state.State.
|
|
|
|
// Usually an in-memory datastore suffices. The given datastore should be
|
|
|
|
// thread-safe.
|
|
|
|
func OfflineState(cfg *Config, store ds.Datastore) (state.State, error) {
|
|
|
|
r, snapExists, err := LastStateRaw(cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-22 13:26:40 +00:00
|
|
|
st, err := dsstate.New(context.Background(), store, cfg.DatastoreNamespace, dsstate.DefaultHandle())
|
2019-02-20 14:24:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !snapExists {
|
|
|
|
return st, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err = st.Unmarshal(r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return st, nil
|
|
|
|
}
|