fix golint: Address a few golint warnings

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
Hector Sanjuan 2017-12-06 13:45:35 +01:00
parent 1e87fccf0e
commit c0628e43ff
10 changed files with 27 additions and 27 deletions

View File

@ -951,7 +951,7 @@ func (c *Cluster) PinGet(h *cid.Cid) (api.Pin, error) {
}
pin := cState.Get(h)
if pin.Cid == nil {
return pin, errors.New("Cid is not part of the global state")
return pin, errors.New("cid is not part of the global state")
}
return pin, nil
}
@ -1343,7 +1343,7 @@ func (c *Cluster) allocate(hash *cid.Cid, repl int, blacklist []peer.ID) ([]peer
return validAllocations[0 : len(validAllocations)+needed], nil
case candidatesValid < needed:
candidatesIds := []peer.ID{}
for k, _ := range candidates {
for k := range candidates {
candidatesIds = append(candidatesIds, k)
}
err = logError(
@ -1398,13 +1398,13 @@ func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
for _, p := range peers2 {
m2[p] = struct{}{}
}
for k, _ := range m1 {
for k := range m1 {
_, ok := m2[k]
if !ok {
removed = append(removed, k)
}
}
for k, _ := range m2 {
for k := range m2 {
_, ok := m1[k]
if !ok {
added = append(added, k)

View File

@ -163,7 +163,7 @@ func (cfg *Config) Validate() error {
}
if cfg.PrivateKey == nil {
return errors.New("No cluster.private_key set")
return errors.New("no cluster.private_key set")
}
if cfg.Peers == nil {
@ -380,7 +380,7 @@ func DecodeClusterSecret(hexSecret string) ([]byte, error) {
case 32:
return secret, nil
default:
return nil, fmt.Errorf("Input secret is %d bytes, cluster secret should be 32.", secretLen)
return nil, fmt.Errorf("input secret is %d bytes, cluster secret should be 32", secretLen)
}
}
@ -393,7 +393,7 @@ func generateClusterSecret() ([]byte, error) {
secretBytes := make([]byte, 32)
_, err := crand.Read(secretBytes)
if err != nil {
return nil, fmt.Errorf("Error reading from rand: %v", err)
return nil, fmt.Errorf("error reading from rand: %v", err)
}
return secretBytes, nil
}

View File

@ -101,6 +101,8 @@ func NewManager() *Manager {
}
// Shutdown makes sure all configuration save operations are finished
// before returning.
func (cfg *Manager) Shutdown() {
cfg.cancel()
cfg.wg.Wait()

View File

@ -125,7 +125,7 @@ func (cfg *Config) ConfigKey() string {
// at least in appereance.
func (cfg *Config) Validate() error {
if cfg.RaftConfig == nil {
return errors.New("No hashicorp/raft.Config")
return errors.New("no hashicorp/raft.Config")
}
if cfg.WaitForLeaderTimeout <= 0 {
return errors.New("wait_for_leader_timeout <= 0")

View File

@ -66,13 +66,12 @@ func (fw *logForwarder) repeated(t int, msg string) bool {
if !ok || last.msg != msg {
fw.last[t] = &lastMsg{msg, false}
return false
} else {
if !last.tipped {
fw.log(t, "NOTICE: The last RAFT log message repeats and will only be logged once")
last.tipped = true
}
return true
}
if !last.tipped {
fw.log(t, "NOTICE: The last RAFT log message repeats and will only be logged once")
last.tipped = true
}
return true
}
func (fw *logForwarder) log(t int, msg string) {

View File

@ -221,13 +221,13 @@ func diffConfigurations(
for _, s := range c2.Servers {
m2[s.ID] = struct{}{}
}
for k, _ := range m1 {
for k := range m1 {
_, ok := m2[k]
if !ok {
removed = append(removed, k)
}
}
for k, _ := range m2 {
for k := range m2 {
_, ok := m1[k]
if !ok {
added = append(added, k)

View File

@ -287,7 +287,7 @@ removal, upgrade state using this command, and restart every peer.
// run daemon() by default, or error.
func run(c *cli.Context) error {
if len(c.Args()) > 0 {
return fmt.Errorf("Unknown subcommand. Run \"%s help\" for more info", programName)
return fmt.Errorf("unknown subcommand. Run \"%s help\" for more info", programName)
}
return daemon(c)
}

View File

@ -25,8 +25,8 @@ func upgrade() error {
return err
}
if !snapExists {
logger.Error("No raft state currently exists to upgrade from")
return errors.New("No snapshot could be found")
logger.Error("no raft state currently exists to upgrade from")
return errors.New("no snapshot could be found")
}
// Restore the state from snapshot
@ -47,10 +47,10 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
state := mapstate.NewMapState()
r, snapExists, err := raft.LastStateRaw(cCfg)
if !snapExists && err != nil {
logger.Error("Error before reading latest snapshot.")
logger.Error("error before reading latest snapshot.")
return err
} else if snapExists && err != nil {
logger.Error("Error after reading last snapshot. Snapshot potentially corrupt.")
logger.Error("error after reading last snapshot. Snapshot potentially corrupt.")
return err
} else if snapExists && err == nil {
raw, err := ioutil.ReadAll(r)
@ -59,7 +59,7 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
}
err = state.Unmarshal(raw)
if err != nil {
logger.Error("Error unmarshalling snapshot. Snapshot potentially corrupt.")
logger.Error("error unmarshalling snapshot. Snapshot potentially corrupt.")
return err
}
if state.GetVersion() != state.Version {
@ -69,7 +69,7 @@ func validateVersion(cfg *ipfscluster.Config, cCfg *raft.Config) error {
logger.Error("To launch a node without this state, rename the consensus data directory.")
logger.Error("Hint, the default is .ipfs-cluster/ipfs-cluster-data.")
logger.Error("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return errors.New("Outdated state version stored")
return errors.New("outdated state version stored")
}
} // !snapExists && err == nil // no existing state, no check needed
return nil

View File

@ -158,8 +158,5 @@ func (st *MapState) Unmarshal(bs []byte) error {
// snapshot is up to date
buf := bytes.NewBuffer(bs[1:])
dec := msgpack.Multicodec(msgpack.DefaultMsgpackHandle()).Decoder(buf)
if err := dec.Decode(st); err != nil {
return err
}
return nil
return dec.Decode(st)
}

View File

@ -52,6 +52,8 @@ func copyEmptyStructToIfaces(in []struct{}) []interface{} {
return ifaces
}
// MultiaddrSplit takes a /proto/value/ipfs/id multiaddress and returns
// the id on one side and the /proto/value multiaddress on the other.
func MultiaddrSplit(addr ma.Multiaddr) (peer.ID, ma.Multiaddr, error) {
return multiaddrSplit(addr)
}