Issue #131: Make sure peers are moved to bootstrap when leaving

Also, do not shutdown when seeing our own departure during bootstrap.

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
This commit is contained in:
Hector Sanjuan 2017-11-01 13:41:55 +01:00
parent c912cfd205
commit 073c43e291
2 changed files with 41 additions and 20 deletions

View File

@ -429,16 +429,23 @@ func (c *Cluster) Shutdown() error {
logger.Info("shutting down Cluster")
// Only attempt to leave if consensus is initialized and cluster
// was ready at some point. Otherwise, it would mean bootstrap failed.
if c.config.LeaveOnShutdown && c.consensus != nil && c.readyB {
// Only attempt to leave if:
// - consensus is initialized
// - cluster was ready (no bootstrapping error)
// - We are not removed already (means PeerRemove() was called on us)
if c.consensus != nil && c.config.LeaveOnShutdown && c.readyB && !c.removed {
c.removed = true
// best effort
logger.Warning("attempting to leave the cluster. This may take some seconds")
err := c.consensus.LogRmPeer(c.id)
if err != nil {
logger.Error("leaving cluster: " + err.Error())
}
c.removed = true
// save peers as bootstrappers
c.config.Bootstrap = c.peerManager.peersAddrs()
c.peerManager.resetPeers()
c.peerManager.savePeers()
}
// Cancel contexts

View File

@ -58,6 +58,36 @@ func (pm *peerManager) addPeer(addr ma.Multiaddr, save bool) error {
func (pm *peerManager) rmPeer(pid peer.ID, save bool) error {
logger.Debugf("removing peer %s", pid.Pretty())
// Seeing our own departure during bootstrap. Ignore that.
if pid == pm.self && !pm.cluster.readyB {
return nil
}
// remove ourselves, unless:
// - we are not ready yet (means we are boostrapping)
// - we have been removed (means Shutdown() with LeaveOnShutdown flag)
if pid == pm.self && pm.cluster.readyB && !pm.cluster.removed {
logger.Info("this peer has been removed and will shutdown")
pm.cluster.removed = true
// we are removing ourselves. Therefore we need to:
// - convert cluster peers to bootstrapping peers
// - shut ourselves down if we are not in the process
//
// Note that, if we are here, we have already been
// removed from the raft.
// save peers as boostrappers
pm.cluster.config.Bootstrap = pm.peersAddrs()
pm.resetPeers()
pm.savePeers()
time.Sleep(1 * time.Second)
// should block and do nothing if already doing it
pm.cluster.Shutdown()
return nil
}
// Removing a different peer
if pm.isPeer(pid) {
logger.Infof("removing Cluster peer %s", pid.Pretty())
}
@ -66,22 +96,6 @@ func (pm *peerManager) rmPeer(pid peer.ID, save bool) error {
delete(pm.peermap, pid)
pm.m.Unlock()
if pid == pm.self {
logger.Info("this peer has been removed and will shutdown")
// we are removing ourselves. Therefore we need to:
// - convert cluster peers to bootstrapping peers
// - shut ourselves down if we are not in the process
//
// Note that, if we are here, we have already been
// removed from the raft.
pm.cluster.config.Bootstrap = pm.peersAddrs()
pm.resetPeers()
time.Sleep(1 * time.Second)
pm.cluster.removed = true
// should block and do nothing if already doing it
pm.cluster.Shutdown()
}
if save {
pm.savePeers()
}