Merge pull request #214 from ipfs/fix/213-save
Issue #213: Fix save in cluster component config
This commit is contained in:
commit
e51f771391
|
@ -424,7 +424,6 @@ func (c *Cluster) Shutdown() error {
|
|||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
c.config.Bootstrap = c.peerManager.peersAddrs()
|
||||
c.config.NotifySave()
|
||||
c.peerManager.resetPeers()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -67,6 +68,9 @@ type jsonSection map[string]*json.RawMessage
|
|||
// central configuration file when doing LoadJSON(), and saved to it
|
||||
// when doing SaveJSON().
|
||||
type Manager struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
wg sync.WaitGroup
|
||||
|
||||
// The Cluster configuration has a top-level
|
||||
// special section.
|
||||
|
@ -87,17 +91,31 @@ type Manager struct {
|
|||
// NewManager returns a correctly initialized Manager
|
||||
// which is ready to accept component configurations.
|
||||
func NewManager() *Manager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &Manager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
sections: make(map[SectionType]Section),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cfg *Manager) Shutdown() {
|
||||
cfg.cancel()
|
||||
cfg.wg.Wait()
|
||||
}
|
||||
|
||||
func (cfg *Manager) watchSave(save <-chan struct{}) {
|
||||
defer cfg.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-save:
|
||||
cfg.SaveJSON("")
|
||||
err := cfg.SaveJSON("")
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
case <-cfg.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -141,6 +159,9 @@ func (cfg *Manager) Default() error {
|
|||
|
||||
// RegisterComponent lets the Manager load and save component configurations
|
||||
func (cfg *Manager) RegisterComponent(t SectionType, ccfg ComponentConfig) {
|
||||
cfg.wg.Add(1)
|
||||
go cfg.watchSave(ccfg.SaveCh())
|
||||
|
||||
if t == Cluster {
|
||||
cfg.clusterConfig = ccfg
|
||||
return
|
||||
|
@ -156,8 +177,6 @@ func (cfg *Manager) RegisterComponent(t SectionType, ccfg ComponentConfig) {
|
|||
}
|
||||
|
||||
cfg.sections[t][ccfg.ConfigKey()] = ccfg
|
||||
|
||||
go cfg.watchSave(ccfg.SaveCh())
|
||||
}
|
||||
|
||||
// Validate checks that all the registered componenets in this
|
||||
|
@ -278,8 +297,7 @@ func (cfg *Manager) SaveJSON(path string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path, bs, 0600)
|
||||
return err
|
||||
return ioutil.WriteFile(path, bs, 0600)
|
||||
}
|
||||
|
||||
// ToJSON provides a JSON representation of the configuration by
|
||||
|
@ -297,6 +315,7 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
|
|||
|
||||
if cfg.clusterConfig != nil {
|
||||
raw, err := cfg.clusterConfig.ToJSON()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -317,6 +317,7 @@ func daemon(c *cli.Context) error {
|
|||
case <-signalChan:
|
||||
err = cluster.Shutdown()
|
||||
checkErr("shutting down cluster", err)
|
||||
cfg.Shutdown()
|
||||
case <-cluster.Done():
|
||||
return nil
|
||||
|
||||
|
|
|
@ -67,7 +67,6 @@ func (pm *peerManager) rmPeer(pid peer.ID, selfShutdown bool) error {
|
|||
time.Sleep(1 * time.Second)
|
||||
pm.cluster.consensus.Shutdown()
|
||||
pm.cluster.config.Bootstrap = pm.peersAddrs()
|
||||
pm.cluster.config.NotifySave()
|
||||
pm.resetPeers()
|
||||
time.Sleep(4 * time.Second)
|
||||
pm.cluster.Shutdown()
|
||||
|
@ -79,7 +78,9 @@ func (pm *peerManager) rmPeer(pid peer.ID, selfShutdown bool) error {
|
|||
}
|
||||
|
||||
func (pm *peerManager) savePeers() {
|
||||
pm.cluster.config.Peers = pm.peersAddrs()
|
||||
peers := pm.peersAddrs()
|
||||
logger.Debugf("saving peers: %s", peers)
|
||||
pm.cluster.config.Peers = peers
|
||||
pm.cluster.config.NotifySave()
|
||||
}
|
||||
|
||||
|
@ -116,7 +117,7 @@ func (pm *peerManager) peers() []peer.ID {
|
|||
func (pm *peerManager) peersAddrs() []ma.Multiaddr {
|
||||
pm.m.RLock()
|
||||
defer pm.m.RUnlock()
|
||||
var addrs []ma.Multiaddr
|
||||
addrs := []ma.Multiaddr{}
|
||||
for k, addr := range pm.peermap {
|
||||
if k != pm.self {
|
||||
addrs = append(addrs, addr)
|
||||
|
|
Loading…
Reference in New Issue
Block a user