ipfs-cluster/util.go

171 lines
3.8 KiB
Go
Raw Normal View History

package ipfscluster
import (
"bytes"
"errors"
"fmt"
blake2b "golang.org/x/crypto/blake2b"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
)
// PeersFromMultiaddrs returns all the different peers in the given addresses.
// each peer only will appear once in the result, even if several
// multiaddresses for it are provided.
func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID {
var pids []peer.ID
pm := make(map[peer.ID]struct{})
for _, addr := range addrs {
pinfo, err := peer.AddrInfoFromP2pAddr(addr)
if err != nil {
continue
}
Fix #787: Connectivity fixes Currently, unless doing Join() (--bootstrap), we do not connect to any peers on startup. We however loaded up the peerstore file and Raft will automatically connect older peers to figure out who is the leader etc. DHT bootstrap, after Raft was working, did the rest. For CRDTs we need to connect to people on a normal boot as otherwise, unless bootstrapping, this does not happen, even if the peerstore contains known peers. This introduces a number of changes: * Move peerstore file management back inside the Cluster component, which was already in charge of saving the peerstore file. * We keep saving all "known addresses" but we load them with a non permanent TTL, so that there will be clean up of peers we're not connected to for long. * "Bootstrap" (connect) to a small number of peers during Cluster component creation. * Bootstrap the DHT asap after this, so that other cluster components can initialize with a working peer discovery mechanism. * CRDT Trust() method will now: * Protect the trusted Peer ID in the conn manager * Give top priority in the PeerManager to that Peer (see below) * Mark addresses as permanent in the Peerstore The PeerManager now attaches priorities to peers when importing them and is able to order them according to that priority. The result is that peers with high priority are saved first in the peerstore file. When we load the peerstore file, the first entries in it are given the highest priority. This means that during startup we will connect to "trusted peers" first (because they have been tagged with priority in the previous run and saved at the top of the list). Once connected to a small number of peers, we let the DHT bootstrap process in the background do the rest and discover the network. All this makes the peerstore file a "bootstrap" list for CRDTs and we will attempt to connect to peers on that list until some of those connections succeed.
2019-05-23 16:41:33 +00:00
_, ok := pm[pinfo.ID]
if !ok {
Fix #787: Connectivity fixes Currently, unless doing Join() (--bootstrap), we do not connect to any peers on startup. We however loaded up the peerstore file and Raft will automatically connect older peers to figure out who is the leader etc. DHT bootstrap, after Raft was working, did the rest. For CRDTs we need to connect to people on a normal boot as otherwise, unless bootstrapping, this does not happen, even if the peerstore contains known peers. This introduces a number of changes: * Move peerstore file management back inside the Cluster component, which was already in charge of saving the peerstore file. * We keep saving all "known addresses" but we load them with a non permanent TTL, so that there will be clean up of peers we're not connected to for long. * "Bootstrap" (connect) to a small number of peers during Cluster component creation. * Bootstrap the DHT asap after this, so that other cluster components can initialize with a working peer discovery mechanism. * CRDT Trust() method will now: * Protect the trusted Peer ID in the conn manager * Give top priority in the PeerManager to that Peer (see below) * Mark addresses as permanent in the Peerstore The PeerManager now attaches priorities to peers when importing them and is able to order them according to that priority. The result is that peers with high priority are saved first in the peerstore file. When we load the peerstore file, the first entries in it are given the highest priority. This means that during startup we will connect to "trusted peers" first (because they have been tagged with priority in the previous run and saved at the top of the list). Once connected to a small number of peers, we let the DHT bootstrap process in the background do the rest and discover the network. All this makes the peerstore file a "bootstrap" list for CRDTs and we will attempt to connect to peers on that list until some of those connections succeed.
2019-05-23 16:41:33 +00:00
pm[pinfo.ID] = struct{}{}
pids = append(pids, pinfo.ID)
}
}
return pids
}
// // connect to a peer ID.
// func connectToPeer(ctx context.Context, h host.Host, id peer.ID, addr ma.Multiaddr) error {
// err := h.Connect(ctx, peerstore.PeerInfo{
// ID: id,
// Addrs: []ma.Multiaddr{addr},
// })
// return err
// }
// // return the local multiaddresses used to communicate to a peer.
// func localMultiaddrsTo(h host.Host, pid peer.ID) []ma.Multiaddr {
// var addrs []ma.Multiaddr
// conns := h.Network().ConnsToPeer(pid)
// logger.Debugf("conns to %s are: %s", pid, conns)
// for _, conn := range conns {
// addrs = append(addrs, multiaddrJoin(conn.LocalMultiaddr(), h.ID()))
// }
// return addrs
// }
func logError(fmtstr string, args ...interface{}) error {
msg := fmt.Sprintf(fmtstr, args...)
logger.Error(msg)
return errors.New(msg)
}
func containsPeer(list []peer.ID, peer peer.ID) bool {
for _, p := range list {
if p == peer {
return true
}
}
return false
}
func containsCid(list []cid.Cid, ci cid.Cid) bool {
for _, c := range list {
if c.String() == ci.String() {
return true
}
}
return false
}
func minInt(x, y int) int {
if x < y {
return x
}
return y
}
// // updatePinParents modifies the api.Pin input to give it the correct parents
// // so that previous additions to the pins parents are maintained after this
// // pin is committed to consensus. If this pin carries new parents they are
// // merged with those already existing for this CID.
// func updatePinParents(pin *api.Pin, existing *api.Pin) {
// // no existing parents this pin is up to date
// if existing.Parents == nil || len(existing.Parents.Keys()) == 0 {
// return
// }
// for _, c := range existing.Parents.Keys() {
// pin.Parents.Add(c)
// }
// }
type distance [blake2b.Size256]byte
type distanceChecker struct {
local peer.ID
otherPeers []peer.ID
cache map[peer.ID]distance
}
func (dc distanceChecker) isClosest(ci cid.Cid) bool {
ciHash := convertKey(ci.KeyString())
localPeerHash := dc.convertPeerID(dc.local)
myDistance := xor(ciHash, localPeerHash)
for _, p := range dc.otherPeers {
peerHash := dc.convertPeerID(p)
distance := xor(peerHash, ciHash)
// if myDistance is larger than for other peers...
if bytes.Compare(myDistance[:], distance[:]) > 0 {
return false
}
}
return true
}
// convertPeerID hashes a Peer ID (Multihash).
func (dc distanceChecker) convertPeerID(id peer.ID) distance {
hash, ok := dc.cache[id]
if ok {
return hash
}
hashBytes := convertKey(string(id))
dc.cache[id] = hashBytes
return hashBytes
}
// convertKey hashes a key.
func convertKey(id string) distance {
return blake2b.Sum256([]byte(id))
}
func xor(a, b distance) distance {
var c distance
for i := 0; i < len(c); i++ {
c[i] = a[i] ^ b[i]
}
return c
}
2019-11-08 14:15:01 +00:00
// peersSubtract subtracts peers ID slice b from peers ID slice a.
func peersSubtract(a []peer.ID, b []peer.ID) []peer.ID {
lenb := len(b)
result := make([]peer.ID, len(a)-lenb)
bMap := make(map[peer.ID]struct{}, lenb)
for _, p := range b {
bMap[p] = struct{}{}
}
for _, p := range a {
_, ok := bMap[p]
if ok {
continue
}
result = append(result, p)
}
return result
}