fix golint: Address a few golint warnings

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
Hector Sanjuan 2017-12-06 13:45:35 +01:00
parent 1e87fccf0e
commit c0628e43ff
10 changed files with 27 additions and 27 deletions

View File

@ -951,7 +951,7 @@ func (c *Cluster) PinGet(h *cid.Cid) (api.Pin, error) {
} }
pin := cState.Get(h) pin := cState.Get(h)
if pin.Cid == nil { if pin.Cid == nil {
return pin, errors.New("Cid is not part of the global state") return pin, errors.New("cid is not part of the global state")
} }
return pin, nil return pin, nil
} }
@ -1343,7 +1343,7 @@ func (c *Cluster) allocate(hash *cid.Cid, repl int, blacklist []peer.ID) ([]peer
return validAllocations[0 : len(validAllocations)+needed], nil return validAllocations[0 : len(validAllocations)+needed], nil
case candidatesValid < needed: case candidatesValid < needed:
candidatesIds := []peer.ID{} candidatesIds := []peer.ID{}
for k, _ := range candidates { for k := range candidates {
candidatesIds = append(candidatesIds, k) candidatesIds = append(candidatesIds, k)
} }
err = logError( err = logError(
@ -1398,13 +1398,13 @@ func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
for _, p := range peers2 { for _, p := range peers2 {
m2[p] = struct{}{} m2[p] = struct{}{}
} }
for k, _ := range m1 { for k := range m1 {
_, ok := m2[k] _, ok := m2[k]
if !ok { if !ok {
removed = append(removed, k) removed = append(removed, k)
} }
} }
for k, _ := range m2 { for k := range m2 {
_, ok := m1[k] _, ok := m1[k]
if !ok { if !ok {
added = append(added, k) added = append(added, k)

View File

@ -163,7 +163,7 @@ func (cfg *Config) Validate() error {
} }
if cfg.PrivateKey == nil { if cfg.PrivateKey == nil {
return errors.New("No cluster.private_key set") return errors.New("no cluster.private_key set")
} }
if cfg.Peers == nil { if cfg.Peers == nil {
@ -380,7 +380,7 @@ func DecodeClusterSecret(hexSecret string) ([]byte, error) {
case 32: case 32:
return secret, nil return secret, nil
default: default:
return nil, fmt.Errorf("Input secret is %d bytes, cluster secret should be 32.", secretLen) return nil, fmt.Errorf("input secret is %d bytes, cluster secret should be 32", secretLen)
} }
} }
@ -393,7 +393,7 @@ func generateClusterSecret() ([]byte, error) {
secretBytes := make([]byte, 32) secretBytes := make([]byte, 32)
_, err := crand.Read(secretBytes) _, err := crand.Read(secretBytes)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error reading from rand: %v", err) return nil, fmt.Errorf("error reading from rand: %v", err)
} }
return secretBytes, nil return secretBytes, nil
} }

View File

@ -101,6 +101,8 @@ func NewManager() *Manager {
} }
// Shutdown makes sure all configuration save operations are finished
// before returning.
func (cfg *Manager) Shutdown() { func (cfg *Manager) Shutdown() {
cfg.cancel() cfg.cancel()
cfg.wg.Wait() cfg.wg.Wait()

View File

@ -125,7 +125,7 @@ func (cfg *Config) ConfigKey() string {
// at least in appereance. // at least in appereance.
func (cfg *Config) Validate() error { func (cfg *Config) Validate() error {
if cfg.RaftConfig == nil { if cfg.RaftConfig == nil {
return errors.New("No hashicorp/raft.Config") return errors.New("no hashicorp/raft.Config")
} }
if cfg.WaitForLeaderTimeout <= 0 { if cfg.WaitForLeaderTimeout <= 0 {
return errors.New("wait_for_leader_timeout <= 0") return errors.New("wait_for_leader_timeout <= 0")

View File

@ -66,14 +66,13 @@ func (fw *logForwarder) repeated(t int, msg string) bool {
if !ok || last.msg != msg { if !ok || last.msg != msg {
fw.last[t] = &lastMsg{msg, false} fw.last[t] = &lastMsg{msg, false}
return false return false
} else { }
if !last.tipped { if !last.tipped {
fw.log(t, "NOTICE: The last RAFT log message repeats and will only be logged once") fw.log(t, "NOTICE: The last RAFT log message repeats and will only be logged once")
last.tipped = true last.tipped = true
} }
return true return true
} }
}
func (fw *logForwarder) log(t int, msg string) { func (fw *logForwarder) log(t int, msg string) {
switch t { switch t {

View File

@ -221,13 +221,13 @@ func diffConfigurations(
for _, s := range c2.Servers { for _, s := range c2.Servers {
m2[s.ID] = struct{}{} m2[s.ID] = struct{}{}
} }
for k, _ := range m1 { for k := range m1 {
_, ok := m2[k] _, ok := m2[k]
if !ok { if !ok {
removed = append(removed, k) removed = append(removed, k)
} }
} }
for k, _ := range m2 { for k := range m2 {
_, ok := m1[k] _, ok := m1[k]
if !ok { if !ok {
added = append(added, k) added = append(added, k)

View File

@ -287,7 +287,7 @@ removal, upgrade state using this command, and restart every peer.
// run daemon() by default, or error. // run daemon() by default, or error.
func run(c *cli.Context) error { func run(c *cli.Context) error {
if len(c.Args()) > 0 { if len(c.Args()) > 0 {
return fmt.Errorf("Unknown subcommand. Run \"%s help\" for more info", programName) return fmt.Errorf("unknown subcommand. Run \"%s help\" for more info", programName)
} }
return daemon(c) return daemon(c)
} }

View File

@ -25,8 +25,8 @@ func upgrade() error {
return err return err
} }
if !snapExists { if !snapExists {
logger.Error("No raft state currently exists to upgrade from") logger.Error("no raft state currently exists to upgrade from")
return errors.New("No snapshot could be found") return errors.New("no snapshot could be found")
} }
// Restore the state from snapshot // Restore the state from snapshot
@ -47,10 +47,10 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
state := mapstate.NewMapState() state := mapstate.NewMapState()
r, snapExists, err := raft.LastStateRaw(cCfg) r, snapExists, err := raft.LastStateRaw(cCfg)
if !snapExists && err != nil { if !snapExists && err != nil {
logger.Error("Error before reading latest snapshot.") logger.Error("error before reading latest snapshot.")
return err return err
} else if snapExists && err != nil { } else if snapExists && err != nil {
logger.Error("Error after reading last snapshot. Snapshot potentially corrupt.") logger.Error("error after reading last snapshot. Snapshot potentially corrupt.")
return err return err
} else if snapExists && err == nil { } else if snapExists && err == nil {
raw, err := ioutil.ReadAll(r) raw, err := ioutil.ReadAll(r)
@ -59,7 +59,7 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
} }
err = state.Unmarshal(raw) err = state.Unmarshal(raw)
if err != nil { if err != nil {
logger.Error("Error unmarshalling snapshot. Snapshot potentially corrupt.") logger.Error("error unmarshalling snapshot. Snapshot potentially corrupt.")
return err return err
} }
if state.GetVersion() != state.Version { if state.GetVersion() != state.Version {
@ -69,7 +69,7 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
logger.Error("To launch a node without this state, rename the consensus data directory.") logger.Error("To launch a node without this state, rename the consensus data directory.")
logger.Error("Hint, the default is .ipfs-cluster/ipfs-cluster-data.") logger.Error("Hint, the default is .ipfs-cluster/ipfs-cluster-data.")
logger.Error("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") logger.Error("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return errors.New("Outdated state version stored") return errors.New("outdated state version stored")
} }
} // !snapExists && err == nil // no existing state, no check needed } // !snapExists && err == nil // no existing state, no check needed
return nil return nil

View File

@ -158,8 +158,5 @@ func (st *MapState) Unmarshal(bs []byte) error {
// snapshot is up to date // snapshot is up to date
buf := bytes.NewBuffer(bs[1:]) buf := bytes.NewBuffer(bs[1:])
dec := msgpack.Multicodec(msgpack.DefaultMsgpackHandle()).Decoder(buf) dec := msgpack.Multicodec(msgpack.DefaultMsgpackHandle()).Decoder(buf)
if err := dec.Decode(st); err != nil { return dec.Decode(st)
return err
}
return nil
} }

View File

@ -52,6 +52,8 @@ func copyEmptyStructToIfaces(in []struct{}) []interface{} {
return ifaces return ifaces
} }
// MultiaddrSplit takes a /proto/value/ipfs/id multiaddress and returns
// the id on one side and the /proto/value multiaddress on the other.
func MultiaddrSplit(addr ma.Multiaddr) (peer.ID, ma.Multiaddr, error) { func MultiaddrSplit(addr ma.Multiaddr) (peer.ID, ma.Multiaddr, error) {
return multiaddrSplit(addr) return multiaddrSplit(addr)
} }