Fix #835: service: init --peers

* Init should take a list of peers

This commit adds `--peers` option to `ipfs-cluster-service init`

`ipfs-cluster-service init --peers <multiaddress,multiaddress>`

- Adds and writes the given peers to the peerstore file
- For raft config section, adds the peer IDs to the `init_peerset`
- For crdt config section, add the peer IDs to the `trusted_peers`
This commit is contained in:
Kishan Sagathiya 2019-07-25 14:17:44 +05:30 committed by Hector Sanjuan
parent ef8f2cb17d
commit e7b731e0e4
7 changed files with 102 additions and 17 deletions

View File

@ -620,6 +620,7 @@ func (c *Cluster) Shutdown(ctx context.Context) error {
// Try to store peerset file for all known peers whatsoever
// if we got ready (otherwise, don't overwrite anything)
if c.readyB {
// Ignoring error since it's a best-effort
c.peerManager.SavePeerstoreForPeers(c.host.Peerstore().Peers())
}

View File

@ -3,15 +3,20 @@ package main
import (
"bufio"
"context"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"strings"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/pstoremgr"
"github.com/ipfs/ipfs-cluster/version"
peer "github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
semver "github.com/blang/semver"
logging "github.com/ipfs/go-log"
@ -226,6 +231,12 @@ environment variable.
Note that the --force first-level-flag allows to overwrite an existing
configuration with default values. To generate a new identity, please
remove the %s file first and clean any Raft state.
By default, an empty peerstore file will be created too. Initial contents can
be provided with the -peers flag. In this case, the "trusted_peers" list in
the "crdt" configuration section and the "init_peerset" list in the "raft"
configuration section will be prefilled to the peer IDs in the given
multiaddresses.
`,
DefaultConfigFile,
DefaultIdentityFile,
@ -239,6 +250,10 @@ remove the %s file first and clean any Raft state.
Name: "custom-secret, s",
Usage: "prompt for the cluster secret",
},
cli.StringFlag{
Name: "peers",
Usage: "comma-separated list of multiaddresses to init with",
},
},
Action: func(c *cli.Context) error {
userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret"))
@ -289,7 +304,25 @@ remove the %s file first and clean any Raft state.
cfgs.clusterCfg.Secret = userSecret
}
// Save
peersOpt := c.String("peers")
var multiAddrs []ma.Multiaddr
if peersOpt != "" {
addrs := strings.Split(peersOpt, ",")
for _, addr := range addrs {
addr = strings.TrimSpace(addr)
multiAddr, err := ma.NewMultiaddr(addr)
checkErr("parsing peer multiaddress: "+addr, err)
multiAddrs = append(multiAddrs, multiAddr)
}
peers := ipfscluster.PeersFromMultiaddrs(multiAddrs)
cfgs.crdtCfg.TrustedPeers = peers
cfgs.raftCfg.InitPeerset = peers
}
// Save config. Creates the folder.
// Sets BaseDir in components.
saveConfig(cfgMgr)
if !identityExists {
@ -304,6 +337,16 @@ remove the %s file first and clean any Raft state.
checkErr("saving "+DefaultIdentityFile, err)
out("new identity written to %s\n", identityPath)
}
// Initialize peerstore file - even if empty
peerstorePath := cfgs.clusterCfg.GetPeerstorePath()
peerManager := pstoremgr.New(context.Background(), nil, peerstorePath)
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
checkErr("getting AddrInfos from peer multiaddresses", err)
err = peerManager.SavePeerstore(addrInfos)
checkErr("saving peers to peerstore", err)
out("peerstore written to %s with %d entries\n", peerstorePath, len(multiAddrs))
return nil
},
},

View File

@ -372,9 +372,9 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
}
loadCompJSON := func(name string, component ComponentConfig, jsonSection jsonSection, t SectionType) error {
component.SetBaseDir(dir)
raw, ok := jsonSection[name]
if ok {
component.SetBaseDir(dir)
err := component.LoadJSON([]byte(*raw))
if err != nil {
return err
@ -383,7 +383,6 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
} else {
cfg.undefinedComps[t][name] = true
logger.Warningf("%s component is empty, generating default", name)
component.SetBaseDir(dir)
component.Default()
}
@ -425,8 +424,8 @@ func (cfg *Manager) SaveJSON(path string) error {
logger.Info("Saving configuration")
if path == "" {
path = cfg.path
if path != "" {
cfg.path = path
}
bs, err := cfg.ToJSON()
@ -434,12 +433,14 @@ func (cfg *Manager) SaveJSON(path string) error {
return err
}
return ioutil.WriteFile(path, bs, 0600)
return ioutil.WriteFile(cfg.path, bs, 0600)
}
// ToJSON provides a JSON representation of the configuration by
// generating JSON for all componenents registered.
func (cfg *Manager) ToJSON() ([]byte, error) {
dir := filepath.Dir(cfg.path)
err := cfg.Validate()
if err != nil {
return nil, err
@ -451,6 +452,7 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
}
if cfg.clusterConfig != nil {
cfg.clusterConfig.SetBaseDir(dir)
raw, err := cfg.clusterConfig.ToJSON()
if err != nil {
@ -465,6 +467,7 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
// component-configurations in the latter.
updateJSONConfigs := func(section Section, dest *jsonSection) error {
for k, v := range section {
v.SetBaseDir(dir)
logger.Debugf("writing changes for %s section", k)
j, err := v.ToJSON()
if err != nil {

View File

@ -103,8 +103,7 @@ func (cfg *Config) LoadJSON(raw []byte) error {
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
cfg.ClusterName = jcfg.ClusterName
config.SetIfNotDefault(jcfg.ClusterName, &cfg.ClusterName)
for _, p := range jcfg.TrustedPeers {
if p == "*" {
cfg.TrustAll = true

View File

@ -243,9 +243,9 @@ func (pm *Manager) LoadPeerstore() (addrs []ma.Multiaddr) {
// SavePeerstore stores a slice of multiaddresses in the peerstore file, one
// per line.
func (pm *Manager) SavePeerstore(pinfos []peer.AddrInfo) {
func (pm *Manager) SavePeerstore(pinfos []peer.AddrInfo) error {
if pm.peerstorePath == "" {
return
return nil
}
pm.peerstoreLock.Lock()
@ -253,31 +253,40 @@ func (pm *Manager) SavePeerstore(pinfos []peer.AddrInfo) {
f, err := os.Create(pm.peerstorePath)
if err != nil {
logger.Warningf(
logger.Errorf(
"could not save peer addresses to %s: %s",
pm.peerstorePath,
err,
)
return
return err
}
defer f.Close()
for _, pinfo := range pinfos {
if len(pinfo.Addrs) == 0 {
logger.Warning("address info does not have any multiaddresses")
continue
}
addrs, err := peer.AddrInfoToP2pAddrs(&pinfo)
if err != nil {
logger.Warning(err)
continue
}
for _, a := range addrs {
f.Write([]byte(fmt.Sprintf("%s\n", a.String())))
_, err = f.Write([]byte(fmt.Sprintf("%s\n", a.String())))
if err != nil {
return err
}
}
}
return nil
}
// SavePeerstoreForPeers calls PeerInfos and then saves the peerstore
// file using the result.
func (pm *Manager) SavePeerstoreForPeers(peers []peer.ID) {
pm.SavePeerstore(pm.PeerInfos(peers))
func (pm *Manager) SavePeerstoreForPeers(peers []peer.ID) error {
return pm.SavePeerstore(pm.PeerInfos(peers))
}
// Bootstrap attempts to get up to "count" connected peers by trying those

View File

@ -111,7 +111,10 @@ func TestPeerstore(t *testing.T) {
t.Fatal(err)
}
pm.SavePeerstoreForPeers([]peer.ID{test.PeerID1})
err = pm.SavePeerstoreForPeers([]peer.ID{test.PeerID1})
if err != nil {
t.Error(err)
}
pm2 := makeMgr(t)
defer clean(pm2)
@ -172,7 +175,10 @@ func TestPriority(t *testing.T) {
t.Fatal("PeerID1 should be last in the list")
}
pm.SavePeerstoreForPeers([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
err = pm.SavePeerstoreForPeers([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
if err != nil {
t.Error(err)
}
pm2 := makeMgr(t)
defer clean(pm2)

24
sharness/t0021-service-init.sh Executable file
View File

@ -0,0 +1,24 @@
#!/bin/bash
test_description="Test init functionality"
. lib/test-lib.sh
test_expect_success "cluster-service init with --peers succeeds and fills peerstore" '
PEER1=/ip4/192.168.0.129/tcp/9196/ipfs/12D3KooWRN8KRjpyg9rsW2w7StbBRGper65psTZm68cjud9KAkaW
PEER2=/ip4/192.168.0.129/tcp/9196/ipfs/12D3KooWPwrYNj7VficHw5qYidepMGA85756kYgMdNmRM9A1ZHjN
echo $PEER1 >> testPeerstore
echo $PEER2 >> testPeerstore
ipfs-cluster-service --config "test-config" init --peers $PEER1,$PEER2 &&
test_cmp testPeerstore test-config/peerstore
'
test_expect_success "cluster-service init without --peers succeeds and creates empty peerstore" '
ipfs-cluster-service -f --config "test-config" init &&
[ -f "test-config/peerstore" ] &&
[ ! -s "test-config/peerstore" ]
'
test_clean_cluster
test_done