9b9d76f92d
This commit introduces the new go-libp2p-gorpc streaming capabilities for Cluster. The main aim is to work towards heavily reducing memory usage when working with very large pinsets. As a side-effect, it takes the chance to revampt all types for all public methods so that pointers to static what should be static objects are not used anymore. This should heavily reduce heap allocations and GC activity. The main change is that state.List now returns a channel from which to read the pins, rather than pins being all loaded into a huge slice. Things reading pins have been all updated to iterate on the channel rather than on the slice. The full pinset is no longer fully loaded onto memory for things that run regularly like StateSync(). Additionally, the /allocations endpoint of the rest API no longer returns an array of pins, but rather streams json-encoded pin objects directly. This change has extended to the restapi client (which puts pins into a channel as they arrive) and to ipfs-cluster-ctl. There are still pending improvements like StatusAll() calls which should also stream responses, and specially BlockPut calls which should stream blocks directly into IPFS on a single call. These are coming up in future commits.
117 lines
3.0 KiB
Go
117 lines
3.0 KiB
Go
package ipfscluster
|
|
|
|
import (
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
"github.com/ipfs/ipfs-cluster/rpcutil"
|
|
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"go.opencensus.io/trace"
|
|
)
|
|
|
|
// ConnectGraph returns a description of which cluster peers and ipfs
|
|
// daemons are connected to each other.
|
|
func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
|
|
ctx, span := trace.StartSpan(c.ctx, "cluster/ConnectGraph")
|
|
defer span.End()
|
|
|
|
cg := api.ConnectGraph{
|
|
ClusterID: c.host.ID(),
|
|
IDtoPeername: make(map[string]string),
|
|
IPFSLinks: make(map[string][]peer.ID),
|
|
ClusterLinks: make(map[string][]peer.ID),
|
|
ClusterTrustLinks: make(map[string]bool),
|
|
ClustertoIPFS: make(map[string]peer.ID),
|
|
}
|
|
members, err := c.consensus.Peers(ctx)
|
|
if err != nil {
|
|
return cg, err
|
|
}
|
|
|
|
for _, member := range members {
|
|
// one of the entries is for itself, but that shouldn't hurt
|
|
cg.ClusterTrustLinks[peer.Encode(member)] = c.consensus.IsTrustedPeer(ctx, member)
|
|
}
|
|
|
|
peers := make([][]api.ID, len(members))
|
|
|
|
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(members))
|
|
defer rpcutil.MultiCancel(cancels)
|
|
|
|
errs := c.rpcClient.MultiCall(
|
|
ctxs,
|
|
members,
|
|
"Cluster",
|
|
"Peers",
|
|
struct{}{},
|
|
rpcutil.CopyIDSliceToIfaces(peers),
|
|
)
|
|
|
|
for i, err := range errs {
|
|
p := peer.Encode(members[i])
|
|
cg.ClusterLinks[p] = make([]peer.ID, 0)
|
|
if err != nil { // Only setting cluster connections when no error occurs
|
|
logger.Debugf("RPC error reaching cluster peer %s: %s", p, err.Error())
|
|
continue
|
|
}
|
|
|
|
selfConnection, pID := c.recordClusterLinks(&cg, p, peers[i])
|
|
cg.IDtoPeername[p] = pID.Peername
|
|
// IPFS connections
|
|
if !selfConnection {
|
|
logger.Warnf("cluster peer %s not its own peer. No ipfs info ", p)
|
|
continue
|
|
}
|
|
c.recordIPFSLinks(&cg, pID)
|
|
}
|
|
|
|
return cg, nil
|
|
}
|
|
|
|
func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []api.ID) (bool, api.ID) {
|
|
selfConnection := false
|
|
var pID api.ID
|
|
for _, id := range peers {
|
|
if id.Error != "" {
|
|
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty())
|
|
continue
|
|
}
|
|
if peer.Encode(id.ID) == p {
|
|
selfConnection = true
|
|
pID = id
|
|
} else {
|
|
cg.ClusterLinks[p] = append(cg.ClusterLinks[p], id.ID)
|
|
}
|
|
}
|
|
return selfConnection, pID
|
|
}
|
|
|
|
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID api.ID) {
|
|
ipfsID := pID.IPFS.ID
|
|
if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs
|
|
logger.Warnf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
|
|
return
|
|
}
|
|
|
|
pid := peer.Encode(pID.ID)
|
|
ipfsPid := peer.Encode(ipfsID)
|
|
|
|
if _, ok := cg.IPFSLinks[pid]; ok {
|
|
logger.Warnf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
|
|
}
|
|
cg.ClustertoIPFS[pid] = ipfsID
|
|
cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0)
|
|
var swarmPeers []peer.ID
|
|
err := c.rpcClient.Call(
|
|
pID.ID,
|
|
"IPFSConnector",
|
|
"SwarmPeers",
|
|
struct{}{},
|
|
&swarmPeers,
|
|
)
|
|
if err != nil {
|
|
return
|
|
}
|
|
cg.IPFSLinks[ipfsPid] = swarmPeers
|
|
}
|