3c3341e491
The monitor component should be in charge of deciding how it is best to send metrics to other peers and what that means. This adds the PublishMetric() method to the component interface and moves that functionality from Cluster main component to the basic monitor. There is a behaviour change. Before, the metrics where sent only to the leader, while the leader was the only peer to broadcast them everywhere. Now, all peers broadcast all metrics everywhere. This is mostly because we should not rely on the consensus layer providing a Leader(), so we are taking the chance to remove this dependency. Note that in any-case, pubsub monitoring should replace the existing basic monitor. This is just paving the ground. Additionally, in order to not duplicate the multiRPC code in the monitor, I have moved that functionality to go-libp2p-gorpc and added an rpcutil library to cluster which includes useful methods to perform multiRPC requests (some of them existed in util.go, others are new and help handling multiple contexts etc). License: MIT Signed-off-by: Hector Sanjuan <code@hector.link>
101 lines
2.5 KiB
Go
101 lines
2.5 KiB
Go
package ipfscluster
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
|
|
host "github.com/libp2p/go-libp2p-host"
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
)
|
|
|
|
// PeersFromMultiaddrs returns all the different peers in the given addresses.
|
|
// each peer only will appear once in the result, even if several
|
|
// multiaddresses for it are provided.
|
|
func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID {
|
|
var pids []peer.ID
|
|
pm := make(map[peer.ID]struct{})
|
|
for _, addr := range addrs {
|
|
pid, _, err := api.Libp2pMultiaddrSplit(addr)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
_, ok := pm[pid]
|
|
if !ok {
|
|
pm[pid] = struct{}{}
|
|
pids = append(pids, pid)
|
|
}
|
|
}
|
|
return pids
|
|
}
|
|
|
|
// // connect to a peer ID.
|
|
// func connectToPeer(ctx context.Context, h host.Host, id peer.ID, addr ma.Multiaddr) error {
|
|
// err := h.Connect(ctx, peerstore.PeerInfo{
|
|
// ID: id,
|
|
// Addrs: []ma.Multiaddr{addr},
|
|
// })
|
|
// return err
|
|
// }
|
|
|
|
// // return the local multiaddresses used to communicate to a peer.
|
|
// func localMultiaddrsTo(h host.Host, pid peer.ID) []ma.Multiaddr {
|
|
// var addrs []ma.Multiaddr
|
|
// conns := h.Network().ConnsToPeer(pid)
|
|
// logger.Debugf("conns to %s are: %s", pid, conns)
|
|
// for _, conn := range conns {
|
|
// addrs = append(addrs, multiaddrJoin(conn.LocalMultiaddr(), h.ID()))
|
|
// }
|
|
// return addrs
|
|
// }
|
|
|
|
// If we have connections open to that PID and they are using a different addr
|
|
// then we return the one we are using, otherwise the one provided
|
|
func getRemoteMultiaddr(h host.Host, pid peer.ID, addr ma.Multiaddr) ma.Multiaddr {
|
|
conns := h.Network().ConnsToPeer(pid)
|
|
if len(conns) > 0 {
|
|
return api.MustLibp2pMultiaddrJoin(conns[0].RemoteMultiaddr(), pid)
|
|
}
|
|
return api.MustLibp2pMultiaddrJoin(addr, pid)
|
|
}
|
|
|
|
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
|
|
pis := make([]api.PinInfoSerial, len(pi), len(pi))
|
|
for i, v := range pi {
|
|
pis[i] = v.ToSerial()
|
|
}
|
|
return pis
|
|
}
|
|
|
|
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
|
|
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
|
|
for i, v := range gpi {
|
|
gpis[i] = v.ToSerial()
|
|
}
|
|
return gpis
|
|
}
|
|
|
|
func logError(fmtstr string, args ...interface{}) error {
|
|
msg := fmt.Sprintf(fmtstr, args...)
|
|
logger.Error(msg)
|
|
return errors.New(msg)
|
|
}
|
|
|
|
func containsPeer(list []peer.ID, peer peer.ID) bool {
|
|
for _, p := range list {
|
|
if p == peer {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func minInt(x, y int) int {
|
|
if x < y {
|
|
return x
|
|
}
|
|
return y
|
|
}
|