196aa23f34
Currently, unless doing Join() (--bootstrap), we do not connect to any peers on startup. We however loaded up the peerstore file and Raft will automatically connect older peers to figure out who is the leader etc. DHT bootstrap, after Raft was working, did the rest. For CRDTs we need to connect to people on a normal boot as otherwise, unless bootstrapping, this does not happen, even if the peerstore contains known peers. This introduces a number of changes: * Move peerstore file management back inside the Cluster component, which was already in charge of saving the peerstore file. * We keep saving all "known addresses" but we load them with a non permanent TTL, so that there will be clean up of peers we're not connected to for long. * "Bootstrap" (connect) to a small number of peers during Cluster component creation. * Bootstrap the DHT asap after this, so that other cluster components can initialize with a working peer discovery mechanism. * CRDT Trust() method will now: * Protect the trusted Peer ID in the conn manager * Give top priority in the PeerManager to that Peer (see below) * Mark addresses as permanent in the Peerstore The PeerManager now attaches priorities to peers when importing them and is able to order them according to that priority. The result is that peers with high priority are saved first in the peerstore file. When we load the peerstore file, the first entries in it are given the highest priority. This means that during startup we will connect to "trusted peers" first (because they have been tagged with priority in the previous run and saved at the top of the list). Once connected to a small number of peers, we let the DHT bootstrap process in the background do the rest and discover the network. All this makes the peerstore file a "bootstrap" list for CRDTs and we will attempt to connect to peers on that list until some of those connections succeed.
198 lines
4.1 KiB
Go
198 lines
4.1 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
|
|
ipfscluster "github.com/ipfs/ipfs-cluster"
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
"github.com/ipfs/ipfs-cluster/config"
|
|
"github.com/ipfs/ipfs-cluster/consensus/crdt"
|
|
"github.com/ipfs/ipfs-cluster/consensus/raft"
|
|
"github.com/ipfs/ipfs-cluster/datastore/badger"
|
|
"github.com/ipfs/ipfs-cluster/datastore/inmem"
|
|
"github.com/ipfs/ipfs-cluster/pstoremgr"
|
|
"github.com/ipfs/ipfs-cluster/state"
|
|
|
|
ds "github.com/ipfs/go-datastore"
|
|
)
|
|
|
|
type stateManager interface {
|
|
ImportState(io.Reader) error
|
|
ExportState(io.Writer) error
|
|
GetStore() (ds.Datastore, error)
|
|
Clean() error
|
|
}
|
|
|
|
func newStateManager(consensus string, ident *config.Identity, cfgs *cfgs) stateManager {
|
|
switch consensus {
|
|
case "raft":
|
|
return &raftStateManager{ident, cfgs}
|
|
case "crdt":
|
|
return &crdtStateManager{ident, cfgs}
|
|
case "":
|
|
checkErr("", errors.New("unspecified consensus component"))
|
|
default:
|
|
checkErr("", fmt.Errorf("unknown consensus component '%s'", consensus))
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type raftStateManager struct {
|
|
ident *config.Identity
|
|
cfgs *cfgs
|
|
}
|
|
|
|
func (raftsm *raftStateManager) GetStore() (ds.Datastore, error) {
|
|
return inmem.New(), nil
|
|
}
|
|
|
|
func (raftsm *raftStateManager) getOfflineState(store ds.Datastore) (state.State, error) {
|
|
return raft.OfflineState(raftsm.cfgs.raftCfg, store)
|
|
}
|
|
|
|
func (raftsm *raftStateManager) ImportState(r io.Reader) error {
|
|
err := raftsm.Clean()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
store, err := raftsm.GetStore()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer store.Close()
|
|
st, err := raftsm.getOfflineState(store)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = importState(r, st)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pm := pstoremgr.New(context.Background(), nil, raftsm.cfgs.clusterCfg.GetPeerstorePath())
|
|
raftPeers := append(
|
|
ipfscluster.PeersFromMultiaddrs(pm.LoadPeerstore()),
|
|
raftsm.ident.ID,
|
|
)
|
|
return raft.SnapshotSave(raftsm.cfgs.raftCfg, st, raftPeers)
|
|
}
|
|
|
|
func (raftsm *raftStateManager) ExportState(w io.Writer) error {
|
|
store, err := raftsm.GetStore()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer store.Close()
|
|
st, err := raftsm.getOfflineState(store)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return exportState(w, st)
|
|
}
|
|
|
|
func (raftsm *raftStateManager) Clean() error {
|
|
return raft.CleanupRaft(raftsm.cfgs.raftCfg)
|
|
}
|
|
|
|
type crdtStateManager struct {
|
|
ident *config.Identity
|
|
cfgs *cfgs
|
|
}
|
|
|
|
func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) {
|
|
bds, err := badger.New(crdtsm.cfgs.badgerCfg)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return bds, nil
|
|
}
|
|
|
|
func (crdtsm *crdtStateManager) getOfflineState(store ds.Datastore) (state.BatchingState, error) {
|
|
return crdt.OfflineState(crdtsm.cfgs.crdtCfg, store)
|
|
}
|
|
|
|
func (crdtsm *crdtStateManager) ImportState(r io.Reader) error {
|
|
err := crdtsm.Clean()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
store, err := crdtsm.GetStore()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer store.Close()
|
|
st, err := crdtsm.getOfflineState(store)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
err = importState(r, st)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return st.Commit(context.Background())
|
|
}
|
|
|
|
func (crdtsm *crdtStateManager) ExportState(w io.Writer) error {
|
|
store, err := crdtsm.GetStore()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer store.Close()
|
|
st, err := crdtsm.getOfflineState(store)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return exportState(w, st)
|
|
}
|
|
|
|
func (crdtsm *crdtStateManager) Clean() error {
|
|
store, err := crdtsm.GetStore()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer store.Close()
|
|
return crdt.Clean(context.Background(), crdtsm.cfgs.crdtCfg, store)
|
|
}
|
|
|
|
func importState(r io.Reader, st state.State) error {
|
|
ctx := context.Background()
|
|
dec := json.NewDecoder(r)
|
|
for {
|
|
var pin api.Pin
|
|
err := dec.Decode(&pin)
|
|
if err == io.EOF {
|
|
return nil
|
|
}
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = st.Add(ctx, &pin)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
// ExportState saves a json representation of a state
|
|
func exportState(w io.Writer, st state.State) error {
|
|
pins, err := st.List(context.Background())
|
|
if err != nil {
|
|
return err
|
|
}
|
|
enc := json.NewEncoder(w)
|
|
for _, pin := range pins {
|
|
err := enc.Encode(pin)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|