diff --git a/consensus/raft/raft.go b/consensus/raft/raft.go index d34c9f47..79db55c9 100644 --- a/consensus/raft/raft.go +++ b/consensus/raft/raft.go @@ -38,11 +38,6 @@ var RaftMaxSnapshots = 5 // This is used to reduce disk I/O for the recently committed entries. var RaftLogCacheSize = 512 -// Are we compiled on a 64-bit architecture? -// https://groups.google.com/forum/#!topic/golang-nuts/vAckmhUMAdQ -// This is used below because raft Observers panic on 32-bit. -const sixtyfour = uint64(^uint(0)) == ^uint64(0) - // How long we wait for updates during shutdown before snapshotting var waitForUpdatesShutdownTimeout = 5 * time.Second var waitForUpdatesInterval = 100 * time.Millisecond @@ -260,25 +255,9 @@ func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) { ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForLeader") defer span.End() - obsCh := make(chan hraft.Observation, 1) - if sixtyfour { // 32-bit systems don't support observers - observer := hraft.NewObserver(obsCh, false, nil) - rw.raft.RegisterObserver(observer) - defer rw.raft.DeregisterObserver(observer) - } ticker := time.NewTicker(time.Second / 2) for { select { - case obs := <-obsCh: - _ = obs - // See https://github.com/hashicorp/raft/issues/254 - // switch obs.Data.(type) { - // case hraft.LeaderObservation: - // lObs := obs.Data.(hraft.LeaderObservation) - // logger.Infof("Raft Leader elected: %s", - // lObs.Leader) - // return string(lObs.Leader), nil - // } case <-ticker.C: if l := rw.raft.Leader(); l != "" { logger.Debug("waitForleaderTimer")