2016-12-02 18:33:39 +00:00
|
|
|
package ipfscluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/base64"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
2016-12-15 13:07:19 +00:00
|
|
|
"sync"
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
rpc "github.com/hsanjuan/go-libp2p-rpc"
|
2016-12-16 21:00:08 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
2016-12-16 11:40:28 +00:00
|
|
|
crypto "github.com/libp2p/go-libp2p-crypto"
|
|
|
|
host "github.com/libp2p/go-libp2p-host"
|
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
|
|
|
peerstore "github.com/libp2p/go-libp2p-peerstore"
|
|
|
|
swarm "github.com/libp2p/go-libp2p-swarm"
|
|
|
|
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
|
|
|
|
multiaddr "github.com/multiformats/go-multiaddr"
|
2016-12-02 18:33:39 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Cluster is the main IPFS cluster component. It provides
|
|
|
|
// the go-API for it and orchestrates the componenets that make up the system.
|
|
|
|
type Cluster struct {
|
2016-12-09 19:54:46 +00:00
|
|
|
ctx context.Context
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
config *Config
|
|
|
|
host host.Host
|
|
|
|
rpcServer *rpc.Server
|
|
|
|
rpcClient *rpc.Client
|
2016-12-02 18:33:39 +00:00
|
|
|
|
2016-12-15 18:08:46 +00:00
|
|
|
consensus *Consensus
|
|
|
|
api API
|
2016-12-02 18:33:39 +00:00
|
|
|
ipfs IPFSConnector
|
2016-12-15 18:08:46 +00:00
|
|
|
state State
|
2016-12-06 21:29:59 +00:00
|
|
|
tracker PinTracker
|
2016-12-15 13:07:19 +00:00
|
|
|
|
|
|
|
shutdownLock sync.Mutex
|
|
|
|
shutdown bool
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
wg sync.WaitGroup
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
// NewCluster builds a new IPFS Cluster. It initializes a LibP2P host, creates
|
|
|
|
// and RPC Server and client and sets up all components.
|
|
|
|
func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker PinTracker) (*Cluster, error) {
|
2016-12-09 19:54:46 +00:00
|
|
|
ctx := context.Background()
|
2016-12-02 18:33:39 +00:00
|
|
|
host, err := makeHost(ctx, cfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
rpcServer := rpc.NewServer(host, RPCProtocol)
|
|
|
|
rpcClient := rpc.NewClientWithServer(host, RPCProtocol, rpcServer)
|
|
|
|
|
2016-12-15 18:08:46 +00:00
|
|
|
consensus, err := NewConsensus(cfg, host, state)
|
2016-12-02 18:33:39 +00:00
|
|
|
if err != nil {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Errorf("error creating consensus: %s", err)
|
2016-12-02 18:33:39 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
tracker.SetClient(rpcClient)
|
|
|
|
ipfs.SetClient(rpcClient)
|
|
|
|
api.SetClient(rpcClient)
|
|
|
|
consensus.SetClient(rpcClient)
|
2016-12-16 11:40:28 +00:00
|
|
|
|
2016-12-02 18:33:39 +00:00
|
|
|
cluster := &Cluster{
|
2016-12-15 13:07:19 +00:00
|
|
|
ctx: ctx,
|
|
|
|
config: cfg,
|
|
|
|
host: host,
|
2016-12-23 18:35:37 +00:00
|
|
|
rpcServer: rpcServer,
|
|
|
|
rpcClient: rpcClient,
|
2016-12-15 13:07:19 +00:00
|
|
|
consensus: consensus,
|
|
|
|
api: api,
|
|
|
|
ipfs: ipfs,
|
|
|
|
state: state,
|
|
|
|
tracker: tracker,
|
|
|
|
shutdownCh: make(chan struct{}),
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
err = rpcServer.RegisterName(
|
|
|
|
"Cluster",
|
|
|
|
&RPCAPI{cluster: cluster})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-12-22 16:14:15 +00:00
|
|
|
logger.Infof("starting IPFS Cluster v%s", Version)
|
2016-12-09 19:54:46 +00:00
|
|
|
|
2016-12-15 13:07:19 +00:00
|
|
|
cluster.run()
|
2016-12-02 18:33:39 +00:00
|
|
|
return cluster, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown stops the IPFS cluster components
|
|
|
|
func (c *Cluster) Shutdown() error {
|
2016-12-15 13:07:19 +00:00
|
|
|
c.shutdownLock.Lock()
|
|
|
|
defer c.shutdownLock.Unlock()
|
|
|
|
if c.shutdown {
|
|
|
|
logger.Warning("Cluster is already shutdown")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Info("shutting down IPFS Cluster")
|
2016-12-02 18:33:39 +00:00
|
|
|
if err := c.consensus.Shutdown(); err != nil {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Errorf("error stopping consensus: %s", err)
|
2016-12-02 18:33:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := c.api.Shutdown(); err != nil {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Errorf("error stopping API: %s", err)
|
2016-12-02 18:33:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := c.ipfs.Shutdown(); err != nil {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Errorf("error stopping IPFS Connector: %s", err)
|
2016-12-02 18:33:39 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-12-07 16:21:29 +00:00
|
|
|
|
|
|
|
if err := c.tracker.Shutdown(); err != nil {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Errorf("error stopping PinTracker: %s", err)
|
2016-12-07 16:21:29 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-12-15 13:07:19 +00:00
|
|
|
c.shutdownCh <- struct{}{}
|
|
|
|
c.wg.Wait()
|
2016-12-23 18:35:37 +00:00
|
|
|
c.host.Close() // Shutdown all network services
|
2016-12-09 19:54:46 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-20 18:51:13 +00:00
|
|
|
// StateSync syncs the consensus state to the Pin Tracker, ensuring
|
|
|
|
// that every Cid that should be tracked is tracked. It returns
|
|
|
|
// PinInfo for Cids which were added or deleted.
|
|
|
|
func (c *Cluster) StateSync() ([]PinInfo, error) {
|
2016-12-09 19:54:46 +00:00
|
|
|
cState, err := c.consensus.State()
|
|
|
|
if err != nil {
|
2016-12-15 18:08:46 +00:00
|
|
|
return nil, err
|
2016-12-09 19:54:46 +00:00
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
|
|
|
|
logger.Info("syncing state to tracker")
|
|
|
|
clusterPins := cState.ListPins()
|
|
|
|
var changed []*cid.Cid
|
|
|
|
|
|
|
|
// Track items which are not tracked
|
|
|
|
for _, h := range clusterPins {
|
|
|
|
if c.tracker.StatusCid(h).IPFS == Unpinned {
|
|
|
|
changed = append(changed, h)
|
2016-12-23 18:35:37 +00:00
|
|
|
err := c.rpcClient.Go("",
|
|
|
|
"Cluster",
|
|
|
|
"Track",
|
|
|
|
NewCidArg(h),
|
|
|
|
&struct{}{},
|
|
|
|
nil)
|
|
|
|
if err != nil {
|
|
|
|
return []PinInfo{}, err
|
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Untrack items which should not be tracked
|
|
|
|
for _, p := range c.tracker.Status() {
|
|
|
|
h, _ := cid.Decode(p.CidStr)
|
|
|
|
if !cState.HasPin(h) {
|
|
|
|
changed = append(changed, h)
|
2016-12-23 18:35:37 +00:00
|
|
|
err := c.rpcClient.Go("",
|
|
|
|
"Cluster",
|
|
|
|
"Track",
|
|
|
|
&CidArg{p.CidStr},
|
|
|
|
&struct{}{},
|
|
|
|
nil)
|
|
|
|
if err != nil {
|
|
|
|
return []PinInfo{}, err
|
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var infos []PinInfo
|
2016-12-19 17:35:24 +00:00
|
|
|
for _, h := range changed {
|
2016-12-20 18:51:13 +00:00
|
|
|
infos = append(infos, c.tracker.StatusCid(h))
|
|
|
|
}
|
|
|
|
return infos, nil
|
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
// Status returns the GlobalPinInfo for all tracked Cids. If an error happens,
|
|
|
|
// the slice will contain as much information as could be fetched.
|
|
|
|
func (c *Cluster) Status() ([]GlobalPinInfo, error) {
|
|
|
|
return c.globalPinInfoSlice("TrackerStatus")
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatusCid returns the GlobalPinInfo for a given Cid. If an error happens,
|
|
|
|
// the GlobalPinInfo should contain as much information as could be fetched.
|
|
|
|
func (c *Cluster) StatusCid(h *cid.Cid) (GlobalPinInfo, error) {
|
|
|
|
return c.globalPinInfoCid("TrackerStatusCid", h)
|
|
|
|
}
|
|
|
|
|
2016-12-20 18:51:13 +00:00
|
|
|
// LocalSync makes sure that the current state the Tracker matches
|
|
|
|
// the IPFS daemon state by triggering a Tracker.Sync() and Recover()
|
|
|
|
// on all items that need it. Returns PinInfo for items changed on Sync().
|
|
|
|
//
|
|
|
|
// LocalSync triggers recoveries asynchronously, and will not wait for
|
|
|
|
// them to fail or succeed before returning.
|
|
|
|
func (c *Cluster) LocalSync() ([]PinInfo, error) {
|
|
|
|
status := c.tracker.Status()
|
|
|
|
var toRecover []*cid.Cid
|
|
|
|
|
|
|
|
for _, p := range status {
|
|
|
|
h, _ := cid.Decode(p.CidStr)
|
|
|
|
modified := c.tracker.Sync(h)
|
|
|
|
if modified {
|
|
|
|
toRecover = append(toRecover, h)
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
2016-12-09 19:54:46 +00:00
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
|
|
|
|
logger.Infof("%d items to recover after sync", len(toRecover))
|
|
|
|
for i, h := range toRecover {
|
2016-12-23 18:35:37 +00:00
|
|
|
logger.Infof("recovering in progress for %s (%d/%d",
|
|
|
|
h, i, len(toRecover))
|
|
|
|
go func(h *cid.Cid) {
|
|
|
|
c.tracker.Recover(h)
|
|
|
|
}(h)
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var changed []PinInfo
|
|
|
|
for _, h := range toRecover {
|
|
|
|
changed = append(changed, c.tracker.StatusCid(h))
|
|
|
|
}
|
|
|
|
return changed, nil
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-20 18:51:13 +00:00
|
|
|
// LocalSyncCid performs a Tracker.Sync() operation followed by a
|
|
|
|
// Recover() when needed. It returns the latest known PinInfo for the Cid.
|
|
|
|
//
|
|
|
|
// LocalSyncCid will wait for the Recover operation to fail or succeed before
|
|
|
|
// returning.
|
2016-12-19 17:35:24 +00:00
|
|
|
func (c *Cluster) LocalSyncCid(h *cid.Cid) (PinInfo, error) {
|
2016-12-20 18:51:13 +00:00
|
|
|
var err error
|
|
|
|
if c.tracker.Sync(h) {
|
|
|
|
err = c.tracker.Recover(h)
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
return c.tracker.StatusCid(h), err
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GlobalSync triggers Sync() operations in all members of the Cluster.
|
2016-12-19 17:35:24 +00:00
|
|
|
func (c *Cluster) GlobalSync() ([]GlobalPinInfo, error) {
|
2016-12-23 18:35:37 +00:00
|
|
|
return c.globalPinInfoSlice("LocalSync")
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-20 18:51:13 +00:00
|
|
|
// GlobalSyncCid triggers a LocalSyncCid() operation for a given Cid
|
|
|
|
// in all members of the Cluster.
|
|
|
|
//
|
|
|
|
// GlobalSyncCid will only return when all operations have either failed,
|
|
|
|
// succeeded or timed-out.
|
2016-12-19 17:35:24 +00:00
|
|
|
func (c *Cluster) GlobalSyncCid(h *cid.Cid) (GlobalPinInfo, error) {
|
2016-12-23 18:35:37 +00:00
|
|
|
return c.globalPinInfoCid("LocalSyncCid", h)
|
2016-12-15 18:08:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pins returns the list of Cids managed by Cluster and which are part
|
|
|
|
// of the current global state. This is the source of truth as to which
|
|
|
|
// pins are managed, but does not indicate if the item is successfully pinned.
|
|
|
|
func (c *Cluster) Pins() []*cid.Cid {
|
|
|
|
cState, err := c.consensus.State()
|
|
|
|
if err != nil {
|
|
|
|
return []*cid.Cid{}
|
|
|
|
}
|
|
|
|
return cState.ListPins()
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pin makes the cluster Pin a Cid. This implies adding the Cid
|
|
|
|
// to the IPFS Cluster peers shared-state. Depending on the cluster
|
2016-12-16 11:40:28 +00:00
|
|
|
// pinning strategy, the PinTracker may then request the IPFS daemon
|
|
|
|
// to pin the Cid. When the current node is not the cluster leader,
|
|
|
|
// the request is forwarded to the leader.
|
2016-12-02 18:33:39 +00:00
|
|
|
//
|
|
|
|
// Pin returns an error if the operation could not be persisted
|
|
|
|
// to the global state. Pin does not reflect the success or failure
|
|
|
|
// of underlying IPFS daemon pinning operations.
|
|
|
|
func (c *Cluster) Pin(h *cid.Cid) error {
|
2016-12-15 13:19:41 +00:00
|
|
|
logger.Info("pinning:", h)
|
2016-12-23 18:35:37 +00:00
|
|
|
leader, err := c.consensus.Leader()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = c.rpcClient.Call(
|
|
|
|
leader,
|
|
|
|
"Cluster",
|
|
|
|
"ConsensusLogPin",
|
|
|
|
NewCidArg(h),
|
|
|
|
&struct{}{})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unpin makes the cluster Unpin a Cid. This implies adding the Cid
|
2016-12-16 11:40:28 +00:00
|
|
|
// to the IPFS Cluster peers shared-state. When the current node is
|
|
|
|
// not the cluster leader, the request is forwarded to the leader.
|
2016-12-02 18:33:39 +00:00
|
|
|
//
|
|
|
|
// Unpin returns an error if the operation could not be persisted
|
|
|
|
// to the global state. Unpin does not reflect the success or failure
|
|
|
|
// of underlying IPFS daemon unpinning operations.
|
|
|
|
func (c *Cluster) Unpin(h *cid.Cid) error {
|
2016-12-23 18:35:37 +00:00
|
|
|
logger.Info("pinning:", h)
|
|
|
|
leader, err := c.consensus.Leader()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = c.rpcClient.Call(
|
|
|
|
leader,
|
|
|
|
"Cluster",
|
|
|
|
"ConsensusLogUnpin",
|
|
|
|
NewCidArg(h),
|
|
|
|
&struct{}{})
|
2016-12-16 11:40:28 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-12-02 18:33:39 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Version returns the current IPFS Cluster version
|
|
|
|
func (c *Cluster) Version() string {
|
|
|
|
return Version
|
|
|
|
}
|
|
|
|
|
|
|
|
// Members returns the IDs of the members of this Cluster
|
|
|
|
func (c *Cluster) Members() []peer.ID {
|
|
|
|
return c.host.Peerstore().Peers()
|
|
|
|
}
|
|
|
|
|
|
|
|
// run reads from the RPC channels of the different components and launches
|
|
|
|
// short-lived go-routines to handle any requests.
|
|
|
|
func (c *Cluster) run() {
|
2016-12-15 13:07:19 +00:00
|
|
|
c.wg.Add(1)
|
2016-12-23 18:35:37 +00:00
|
|
|
|
|
|
|
// Currently we do nothing other than waiting to
|
|
|
|
// cancel our context.
|
2016-12-15 13:07:19 +00:00
|
|
|
go func() {
|
|
|
|
defer c.wg.Done()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
c.ctx = ctx
|
2016-12-23 18:35:37 +00:00
|
|
|
<-c.shutdownCh
|
2016-12-15 13:07:19 +00:00
|
|
|
}()
|
2016-12-14 16:25:21 +00:00
|
|
|
}
|
2016-12-06 21:29:59 +00:00
|
|
|
|
2016-12-02 18:33:39 +00:00
|
|
|
// makeHost makes a libp2p-host
|
2016-12-15 18:08:46 +00:00
|
|
|
func makeHost(ctx context.Context, cfg *Config) (host.Host, error) {
|
2016-12-02 18:33:39 +00:00
|
|
|
ps := peerstore.NewPeerstore()
|
|
|
|
peerID, err := peer.IDB58Decode(cfg.ID)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("decoding ID: ", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pkb, err := base64.StdEncoding.DecodeString(cfg.PrivateKey)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("decoding private key base64: ", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
privateKey, err := crypto.UnmarshalPrivateKey(pkb)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("unmarshaling private key", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
publicKey := privateKey.GetPublic()
|
|
|
|
|
|
|
|
addr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d",
|
2016-12-16 18:14:45 +00:00
|
|
|
cfg.ClusterAddr, cfg.ClusterPort))
|
2016-12-02 18:33:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ps.AddPubKey(peerID, publicKey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ps.AddPrivKey(peerID, privateKey); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, cpeer := range cfg.ClusterPeers {
|
|
|
|
addr, err := multiaddr.NewMultiaddr(cpeer)
|
|
|
|
if err != nil {
|
|
|
|
logger.Errorf("parsing cluster peer multiaddress %s: %s", cpeer, err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pid, err := addr.ValueForProtocol(multiaddr.P_IPFS)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
strAddr := strings.Split(addr.String(), "/ipfs/")[0]
|
|
|
|
maddr, err := multiaddr.NewMultiaddr(strAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
peerID, err := peer.IDB58Decode(pid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ps.AddAddrs(
|
|
|
|
peerID,
|
|
|
|
[]multiaddr.Multiaddr{maddr},
|
|
|
|
peerstore.PermanentAddrTTL)
|
|
|
|
}
|
|
|
|
|
|
|
|
network, err := swarm.NewNetwork(
|
|
|
|
ctx,
|
|
|
|
[]multiaddr.Multiaddr{addr},
|
|
|
|
peerID,
|
|
|
|
ps,
|
|
|
|
nil,
|
|
|
|
)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bhost := basichost.New(network)
|
|
|
|
return bhost, nil
|
|
|
|
}
|
2016-12-20 18:51:13 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
// Perform a sync rpc request to multiple destinations
|
|
|
|
func (c *Cluster) multiRPC(dests []peer.ID, svcName, svcMethod string, args interface{}, reply []interface{}) []error {
|
|
|
|
if len(dests) != len(reply) {
|
|
|
|
panic("must have mathing dests and replies")
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
errs := make([]error, len(dests), len(dests))
|
|
|
|
|
2016-12-28 15:25:24 +00:00
|
|
|
for i := range dests {
|
2016-12-23 18:35:37 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
|
|
|
err := c.rpcClient.Call(
|
|
|
|
dests[i],
|
|
|
|
svcName,
|
|
|
|
svcMethod,
|
|
|
|
args,
|
|
|
|
reply[i])
|
|
|
|
errs[i] = err
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return errs
|
2016-12-20 18:51:13 +00:00
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (GlobalPinInfo, error) {
|
2016-12-20 18:51:13 +00:00
|
|
|
pin := GlobalPinInfo{
|
|
|
|
Cid: h,
|
|
|
|
Status: make(map[peer.ID]PinInfo),
|
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
members := c.Members()
|
|
|
|
replies := make([]PinInfo, len(members), len(members))
|
|
|
|
ifaceReplies := make([]interface{}, len(members), len(members))
|
2016-12-28 15:25:24 +00:00
|
|
|
for i := range replies {
|
2016-12-23 18:35:37 +00:00
|
|
|
ifaceReplies[i] = &replies[i]
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
2016-12-23 18:35:37 +00:00
|
|
|
args := NewCidArg(h)
|
|
|
|
errs := c.multiRPC(members, "Cluster", method, args, ifaceReplies)
|
2016-12-20 18:51:13 +00:00
|
|
|
|
|
|
|
var errorMsgs string
|
2016-12-23 18:35:37 +00:00
|
|
|
for i, r := range replies {
|
|
|
|
if e := errs[i]; e != nil {
|
|
|
|
logger.Error(e)
|
|
|
|
errorMsgs += e.Error() + "\n"
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
2016-12-23 18:35:37 +00:00
|
|
|
pin.Status[r.Peer] = r
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(errorMsgs) == 0 {
|
|
|
|
return pin, nil
|
|
|
|
}
|
2016-12-28 15:25:24 +00:00
|
|
|
|
|
|
|
return pin, errors.New(errorMsgs)
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
|
2016-12-20 18:51:13 +00:00
|
|
|
var infos []GlobalPinInfo
|
|
|
|
fullMap := make(map[string]GlobalPinInfo)
|
|
|
|
|
2016-12-23 18:35:37 +00:00
|
|
|
members := c.Members()
|
|
|
|
replies := make([][]PinInfo, len(members), len(members))
|
|
|
|
ifaceReplies := make([]interface{}, len(members), len(members))
|
2016-12-28 15:25:24 +00:00
|
|
|
for i := range replies {
|
2016-12-23 18:35:37 +00:00
|
|
|
ifaceReplies[i] = &replies[i]
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
2016-12-23 18:35:37 +00:00
|
|
|
errs := c.multiRPC(members, "Cluster", method, struct{}{}, ifaceReplies)
|
2016-12-20 18:51:13 +00:00
|
|
|
|
|
|
|
mergePins := func(pins []PinInfo) {
|
|
|
|
for _, p := range pins {
|
|
|
|
item, ok := fullMap[p.CidStr]
|
|
|
|
c, _ := cid.Decode(p.CidStr)
|
|
|
|
if !ok {
|
|
|
|
fullMap[p.CidStr] = GlobalPinInfo{
|
|
|
|
Cid: c,
|
|
|
|
Status: map[peer.ID]PinInfo{
|
|
|
|
p.Peer: p,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
item.Status[p.Peer] = p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var errorMsgs string
|
2016-12-23 18:35:37 +00:00
|
|
|
for i, r := range replies {
|
|
|
|
if e := errs[i]; e != nil {
|
|
|
|
logger.Error("error in broadcast response: ", e)
|
|
|
|
errorMsgs += e.Error() + "\n"
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
2016-12-23 18:35:37 +00:00
|
|
|
mergePins(r)
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range fullMap {
|
|
|
|
infos = append(infos, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errorMsgs) == 0 {
|
|
|
|
return infos, nil
|
|
|
|
}
|
2016-12-28 15:25:24 +00:00
|
|
|
return infos, errors.New(errorMsgs)
|
2016-12-20 18:51:13 +00:00
|
|
|
}
|