ipfs-cluster/consensus/raft/raft.go

383 lines
9.5 KiB
Go
Raw Normal View History

package raft
2016-12-02 18:33:39 +00:00
import (
"context"
"errors"
"os"
2016-12-02 18:33:39 +00:00
"path/filepath"
"time"
2016-12-02 18:33:39 +00:00
hraft "github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
p2praft "github.com/libp2p/go-libp2p-raft"
2016-12-02 18:33:39 +00:00
)
// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data
// folder.
// TODO: Maybe include this in Config. Not sure how useful it is to touch
// this anyways.
var RaftMaxSnapshots = 5
// Are we compiled on a 64-bit architecture?
// https://groups.google.com/forum/#!topic/golang-nuts/vAckmhUMAdQ
// This is used below because raft Observers panic on 32-bit.
const sixtyfour = uint64(^uint(0)) == ^uint64(0)
// raftWrapper performs all Raft-specific operations which are needed by
// Cluster but are not fulfilled by the consensus interface. It should contain
// most of the Raft-related stuff so it can be easily replaced in the future,
// if need be.
type raftWrapper struct {
raft *hraft.Raft
srvConfig hraft.Configuration
transport *hraft.NetworkTransport
snapshotStore hraft.SnapshotStore
logStore hraft.LogStore
stableStore hraft.StableStore
boltdb *raftboltdb.BoltStore
2016-12-02 18:33:39 +00:00
}
// newRaft launches a go-libp2p-raft consensus peer.
func newRaftWrapper(peers []peer.ID, host host.Host, cfg *Config, fsm hraft.FSM) (*raftWrapper, error) {
// Set correct LocalID
cfg.RaftConfig.LocalID = hraft.ServerID(peer.IDB58Encode(host.ID()))
// Prepare data folder
dataFolder, err := makeDataFolder(cfg.BaseDir, cfg.DataFolder)
2016-12-02 18:33:39 +00:00
if err != nil {
return nil, err
2016-12-02 18:33:39 +00:00
}
srvCfg := makeServerConf(peers)
2016-12-02 18:33:39 +00:00
logger.Debug("creating libp2p Raft transport")
transport, err := p2praft.NewLibp2pTransport(host, cfg.NetworkTimeout)
if err != nil {
return nil, err
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
}
logger.Debug("creating raft snapshot store")
snapshots, err := hraft.NewFileSnapshotStoreWithLogger(
dataFolder, RaftMaxSnapshots, raftStdLogger)
if err != nil {
return nil, err
}
logger.Debug("creating BoltDB log store")
logStore, err := raftboltdb.NewBoltStore(
filepath.Join(dataFolder, "raft.db"))
2016-12-02 18:33:39 +00:00
if err != nil {
return nil, err
2016-12-02 18:33:39 +00:00
}
logger.Debug("checking for existing raft states")
hasState, err := hraft.HasExistingState(logStore, logStore, snapshots)
2016-12-02 18:33:39 +00:00
if err != nil {
return nil, err
2016-12-02 18:33:39 +00:00
}
if !hasState {
logger.Info("bootstrapping raft cluster")
err := hraft.BootstrapCluster(cfg.RaftConfig,
logStore, logStore, snapshots, transport, srvCfg)
if err != nil {
logger.Error("bootstrapping cluster: ", err)
return nil, err
}
} else {
logger.Info("raft cluster is already bootstrapped")
}
2016-12-02 18:33:39 +00:00
logger.Debug("creating Raft")
r, err := hraft.NewRaft(cfg.RaftConfig,
fsm, logStore, logStore, snapshots, transport)
2016-12-02 18:33:39 +00:00
if err != nil {
logger.Error("initializing raft: ", err)
return nil, err
2016-12-02 18:33:39 +00:00
}
raftW := &raftWrapper{
2016-12-02 18:33:39 +00:00
raft: r,
srvConfig: srvCfg,
2016-12-02 18:33:39 +00:00
transport: transport,
snapshotStore: snapshots,
logStore: logStore,
stableStore: logStore,
boltdb: logStore,
}
// Handle existing, different configuration
if hasState {
cf := r.GetConfiguration()
if err := cf.Error(); err != nil {
return nil, err
}
currentCfg := cf.Configuration()
added, removed := diffConfigurations(srvCfg, currentCfg)
if len(added)+len(removed) > 0 {
raftW.Shutdown()
logger.Warning("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
logger.Warning("Raft peers do not match cluster peers from the configuration.")
logger.Warning("If problems arise, clean this peer and bootstrap it to a working cluster.")
logger.Warning("Raft peers peers:")
for _, s := range currentCfg.Servers {
logger.Warningf(" - %s", s.ID)
}
logger.Warning("Cluster configuration peers:")
for _, s := range srvCfg.Servers {
logger.Warningf(" - %s", s.ID)
}
logger.Warningf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
//return nil, errors.New("Bad cluster peers")
}
}
return raftW, nil
2016-12-02 18:33:39 +00:00
}
// returns the folder path after creating it.
// if folder is empty, it uses baseDir+Default.
func makeDataFolder(baseDir, folder string) (string, error) {
if folder == "" {
folder = filepath.Join(baseDir, DefaultDataSubFolder)
}
err := os.MkdirAll(folder, 0700)
if err != nil {
return "", err
}
return folder, nil
}
// create Raft servers configuration
func makeServerConf(peers []peer.ID) hraft.Configuration {
sm := make(map[string]struct{})
servers := make([]hraft.Server, 0)
for _, pid := range peers {
p := peer.IDB58Encode(pid)
_, ok := sm[p]
if !ok { // avoid dups
sm[p] = struct{}{}
servers = append(servers, hraft.Server{
Suffrage: hraft.Voter,
ID: hraft.ServerID(p),
Address: hraft.ServerAddress(p),
})
}
}
return hraft.Configuration{
Servers: servers,
}
}
// diffConfigurations returns the serverIDs added and removed from
// c2 in relation to c1.
func diffConfigurations(
c1, c2 hraft.Configuration) (added, removed []hraft.ServerID) {
m1 := make(map[hraft.ServerID]struct{})
m2 := make(map[hraft.ServerID]struct{})
added = make([]hraft.ServerID, 0)
removed = make([]hraft.ServerID, 0)
for _, s := range c1.Servers {
m1[s.ID] = struct{}{}
}
for _, s := range c2.Servers {
m2[s.ID] = struct{}{}
}
for k, _ := range m1 {
_, ok := m2[k]
if !ok {
removed = append(removed, k)
}
}
for k, _ := range m2 {
_, ok := m1[k]
if !ok {
added = append(added, k)
}
}
return
}
// WaitForLeader holds until Raft says we have a leader.
// Returns uf ctx is cancelled.
func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) {
obsCh := make(chan hraft.Observation, 1)
if sixtyfour { // 32-bit systems don't support observers
observer := hraft.NewObserver(obsCh, false, nil)
rw.raft.RegisterObserver(observer)
defer rw.raft.DeregisterObserver(observer)
}
ticker := time.NewTicker(time.Second / 2)
for {
select {
case obs := <-obsCh:
_ = obs
// See https://github.com/hashicorp/raft/issues/254
// switch obs.Data.(type) {
// case hraft.LeaderObservation:
// lObs := obs.Data.(hraft.LeaderObservation)
// logger.Infof("Raft Leader elected: %s",
// lObs.Leader)
// return string(lObs.Leader), nil
// }
case <-ticker.C:
if l := rw.raft.Leader(); l != "" {
logger.Debug("waitForleaderTimer")
logger.Infof("Raft Leader elected: %s", l)
ticker.Stop()
return string(l), nil
}
case <-ctx.Done():
return "", ctx.Err()
}
}
}
// WaitForUpdates holds until Raft has synced to the last index in the log
func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error {
logger.Info("Raft state is catching up")
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
lai := rw.raft.AppliedIndex()
li := rw.raft.LastIndex()
logger.Debugf("current Raft index: %d/%d",
lai, li)
if lai == li {
return nil
}
time.Sleep(500 * time.Millisecond)
}
}
}
// Snapshot tells Raft to take a snapshot.
func (rw *raftWrapper) Snapshot() error {
future := rw.raft.Snapshot()
err := future.Error()
if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() {
return err
}
return nil
}
// Shutdown shutdown Raft and closes the BoltDB.
func (rw *raftWrapper) Shutdown() error {
future := rw.raft.Shutdown()
err := future.Error()
errMsgs := ""
if err != nil {
errMsgs += "could not shutdown raft: " + err.Error() + ".\n"
}
err = rw.boltdb.Close() // important!
if err != nil {
errMsgs += "could not close boltdb: " + err.Error()
}
if errMsgs != "" {
return errors.New(errMsgs)
}
return nil
}
// AddPeer adds a peer to Raft
func (rw *raftWrapper) AddPeer(peer string) error {
// Check that we don't have it to not waste
// log entries if so.
peers, err := rw.Peers()
if err != nil {
return err
}
if find(peers, peer) {
logger.Infof("%s is already a raft peer", peer)
return nil
}
future := rw.raft.AddVoter(
hraft.ServerID(peer),
hraft.ServerAddress(peer),
0,
0) // TODO: Extra cfg value?
err = future.Error()
if err != nil {
logger.Error("raft cannot add peer: ", err)
}
return err
}
// RemovePeer removes a peer from Raft
func (rw *raftWrapper) RemovePeer(peer string) error {
// Check that we have it to not waste
// log entries if we don't.
peers, err := rw.Peers()
if err != nil {
return err
}
if !find(peers, peer) {
logger.Infof("%s is not among raft peers", peer)
return nil
}
if len(peers) == 1 && peers[0] == peer {
return errors.New("cannot remove ourselves from a 1-peer cluster")
}
rmFuture := rw.raft.RemoveServer(
hraft.ServerID(peer),
0,
0) // TODO: Extra cfg value?
err = rmFuture.Error()
if err != nil {
logger.Error("raft cannot remove peer: ", err)
return err
}
// make sure change is applied everywhere before continuing
// this makes sure that a leaving node gets the memo
// before we shut it down.
bFuture := rw.raft.Barrier(10 * time.Second)
err = bFuture.Error()
if err != nil {
logger.Error(err)
return err
}
return nil
}
// Leader returns Raft's leader. It may be an empty string if
// there is no leader or it is unknown.
func (rw *raftWrapper) Leader() string {
return string(rw.raft.Leader())
}
func (rw *raftWrapper) Peers() ([]string, error) {
ids := make([]string, 0)
configFuture := rw.raft.GetConfiguration()
if err := configFuture.Error(); err != nil {
return nil, err
}
for _, server := range configFuture.Configuration().Servers {
ids = append(ids, string(server.ID))
}
return ids, nil
}
func find(s []string, elem string) bool {
for _, selem := range s {
if selem == elem {
return true
}
}
return false
}