WIP: basic functionality

This commit is contained in:
Hector Sanjuan 2016-12-02 19:33:39 +01:00 committed by Hector Sanjuan
parent a5ad1fcf97
commit e0840df267
17 changed files with 1563 additions and 9 deletions

View File

@ -1,21 +1,58 @@
# ipfs-cluster
THIS README IS A STUB. you can help by expanding it.
main repo for ipfs-cluster tooling
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
> Clustering for IPFS nodes.
- old ipfs-cluster design notes https://github.com/ipfs/notes/issues/58
**WORK IN PROGRESS**
### Design Illustrations ([source](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS))
**DO NOT USE IN PRODUCTION**
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.001.jpg)
`ipfs-cluster` is a tool which groups a number of IPFS nodes together, allowing to collectively perform operations such as pinning.
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.002.jpg)
In order to do so `ipfs-cluster` nodes use a libp2p-based consensus algorithm (currently Raft) to agree on a log of operations and build a consistent state across the cluster. The state represents which objects should be pinned by which nodes.
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.003.jpg)
Additionally, `ipfs-cluster` nodes act as a proxy/wrapper to the IPFS API, so an IPFS cluster can be used as a regular node.
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.004.jpg)
`ipfs-cluster` provides a Go API, an equivalent HTTP API (in a RESTful fashion) and a command-line interface tool (`ipfs-cluster-ctl`) which plugs directly into it.
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.005.jpg)
## Table of Contents
- [Background](#background)
- [Install](#install)
- [Usage](#usage)
- [API](#api)
- [Contribute](#contribute)
- [License](#license)
## Background
Since the start of IPFS it was clear that a tool to coordinate a number of different nodes (and the content they are supposed to store) would add a great value to the IPFS ecosystem. Naïve approaches are possible, but they present some weaknesses, specially at dealing with error handling and incorporating multiple pinning strategies.
`ipfs-cluster` aims to address this issues by providing a IPFS node wrapper which coordinates multiple cluster members via a consensus algorithm. This ensures that the desired state of the system is always agreed upon and can be easily maintained by the members of the cluster. Thus, every cluster member has an overview of where each hash is pinned, and the cluster can react to any contingencies, like IPFS nodes dying, by redistributing the storage load to others.
## Install
TODO
## Usage
The documentation and examples for `ipfs-cluster` (useful if you are integrating it in Go) can be found in [godoc.org/github.com/ipfs/ipfs-cluster](https://godoc.org/github.com/ipfs/ipfs-cluster).
TODO
## API
TODO
## Contribute
PRs accepted.
Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
## License
MIT © Protocol Labs, Inc.

285
api.go Normal file
View File

@ -0,0 +1,285 @@
package ipfscluster
import (
"context"
"encoding/json"
"fmt"
"net/http"
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
mux "github.com/gorilla/mux"
)
// ClusterHTTPAPI implements a ClusterAPI and aims to provides
// a RESTful HTTP API for Cluster.
type ClusterHTTPAPI struct {
ctx context.Context
cancel context.CancelFunc
listenAddr string
listenPort int
rpcCh chan ClusterRPC
router *mux.Router
}
type route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type errorResp struct {
Code int `json:"code"`
Message string `json:"message"`
}
func (e errorResp) Error() string {
return e.Message
}
type versionResp struct {
Version string `json:"version"`
}
type pinResp struct {
Pinned string `json:"pinned"`
}
type unpinResp struct {
Unpinned string `json:"unpinned"`
}
// NewHTTPClusterAPI creates a new object which is ready to be
// started.
func NewHTTPClusterAPI(cfg *ClusterConfig) (*ClusterHTTPAPI, error) {
ctx, cancel := context.WithCancel(context.Background())
api := &ClusterHTTPAPI{
ctx: ctx,
cancel: cancel,
listenAddr: cfg.ClusterAPIListenAddr,
listenPort: cfg.ClusterAPIListenPort,
rpcCh: make(chan ClusterRPC, RPCMaxQueue),
}
router := mux.NewRouter().StrictSlash(true)
for _, route := range api.routes() {
router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(route.HandlerFunc)
}
api.router = router
logger.Infof("Starting Cluster API on %s:%d", api.listenAddr, api.listenPort)
go api.run()
return api, nil
}
func (api *ClusterHTTPAPI) routes() []route {
return []route{
route{
"Members",
"GET",
"/members",
api.memberListHandler,
},
route{
"Pins",
"GET",
"/pins",
api.pinListHandler,
},
route{
"Version",
"GET",
"/version",
api.versionHandler,
},
route{
"Pin",
"POST",
"/pins/{hash}",
api.pinHandler,
},
route{
"Unpin",
"DELETE",
"/pins/{hash}",
api.unpinHandler,
},
}
}
func (api *ClusterHTTPAPI) run() {
go func() {
// FIXME: make this with closable net listener
err := http.ListenAndServe(
fmt.Sprintf("%s:%d", api.listenAddr, api.listenPort),
api.router)
if err != nil {
logger.Error("starting ClusterHTTPAPI server:", err)
return
}
}()
// FIXME
go func() {
select {
case <-api.ctx.Done():
return
}
}()
}
// Shutdown stops any API listeners.
func (api *ClusterHTTPAPI) Shutdown() error {
logger.Info("Stopping Cluster API")
api.cancel()
return nil
}
// RpcChan can be used by Cluster to read any
// requests from this component
func (api *ClusterHTTPAPI) RpcChan() <-chan ClusterRPC {
return api.rpcCh
}
func (api *ClusterHTTPAPI) versionHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(api.ctx)
defer cancel()
rRpc := RPC(VersionRPC, nil)
resp := MakeRPC(ctx, api.rpcCh, rRpc, true)
if checkResponse(w, rRpc.Method, resp) {
v := resp.Data.(string)
sendJSONResponse(w, 200, versionResp{v})
}
}
func (api *ClusterHTTPAPI) memberListHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(api.ctx)
defer cancel()
rRpc := RPC(MemberListRPC, nil)
resp := MakeRPC(ctx, api.rpcCh, rRpc, true)
if checkResponse(w, rRpc.Method, resp) {
data := resp.Data.([]peer.ID)
var strPeers []string
for _, p := range data {
strPeers = append(strPeers, p.Pretty())
}
sendJSONResponse(w, 200, strPeers)
}
}
func (api *ClusterHTTPAPI) pinHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(api.ctx)
defer cancel()
vars := mux.Vars(r)
hash := vars["hash"]
c, err := cid.Decode(hash)
if err != nil {
sendErrorResponse(w, 400, err.Error())
}
rRpc := RPC(PinRPC, *c)
resp := MakeRPC(ctx, api.rpcCh, rRpc, true)
if checkResponse(w, rRpc.Method, resp) {
c := resp.Data.(cid.Cid)
sendJSONResponse(w, 200, pinResp{c.String()})
}
}
func (api *ClusterHTTPAPI) unpinHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(api.ctx)
defer cancel()
vars := mux.Vars(r)
hash := vars["hash"]
c, err := cid.Decode(hash)
if err != nil {
sendErrorResponse(w, 400, err.Error())
}
rRpc := RPC(UnpinRPC, *c)
resp := MakeRPC(ctx, api.rpcCh, rRpc, true)
if checkResponse(w, rRpc.Method, resp) {
c := resp.Data.(cid.Cid)
sendJSONResponse(w, 200, unpinResp{c.String()})
}
}
func (api *ClusterHTTPAPI) pinListHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(api.ctx)
defer cancel()
rRpc := RPC(PinListRPC, nil)
resp := MakeRPC(ctx, api.rpcCh, rRpc, true)
if checkResponse(w, rRpc.Method, resp) {
data := resp.Data.([]*cid.Cid)
strPins := make([]string, 0, len(data))
for _, d := range data {
strPins = append(strPins, d.String())
}
sendJSONResponse(w, 200, strPins)
}
}
// checkResponse does basic checking on an RPCResponse. It takes care of
// using the http.ResponseWriter to send an empty response, or to send
// an error if the RPCResponse contains one. It also checks that the RPC
// response data can be casted back into the expected value. It returns false
// if the checks fail or an empty response is sent, and true otherwise.
func checkResponse(w http.ResponseWriter, method RPCMethod, resp RPCResponse) bool {
if resp.Error == nil && resp.Data == nil {
sendEmptyResponse(w)
return false
} else if err := resp.Error; err != nil {
sendErrorResponse(w, 500, err.Error())
return false
}
// Check thatwe can cast to the expected response format
ok := true
switch method {
case PinRPC:
_, ok = resp.Data.(cid.Cid)
case UnpinRPC:
_, ok = resp.Data.(cid.Cid)
case PinListRPC:
_, ok = resp.Data.([]*cid.Cid)
case IPFSPinRPC:
case IPFSUnpinRPC:
case VersionRPC:
_, ok = resp.Data.(string)
case MemberListRPC:
_, ok = resp.Data.([]peer.ID)
default:
ok = false
}
if !ok {
logger.Error("unexpected RPC Response format")
sendErrorResponse(w, 500, "Unexpected RPC Response format")
return false
}
return true
}
func sendEmptyResponse(w http.ResponseWriter) {
w.WriteHeader(http.StatusNoContent)
}
func sendJSONResponse(w http.ResponseWriter, code int, resp interface{}) {
w.WriteHeader(code)
if err := json.NewEncoder(w).Encode(resp); err != nil {
panic(err)
}
}
func sendErrorResponse(w http.ResponseWriter, code int, msg string) {
errorResp := errorResp{code, msg}
logger.Errorf("Sending error response: %d: %s", code, msg)
sendJSONResponse(w, code, errorResp)
}

309
cluster.go Normal file
View File

@ -0,0 +1,309 @@
package ipfscluster
import (
"context"
"encoding/base64"
"errors"
"fmt"
"strings"
host "gx/ipfs/QmPTGbC34bPKaUm9wTxBo7zSCac7pDuG42ZmnXC718CKZZ/go-libp2p-host"
multiaddr "gx/ipfs/QmUAQaWbKxGCUTuoQVvvicbQNZ9APF5pDGWyAZSe93AtKH/go-multiaddr"
swarm "gx/ipfs/QmWfxnAiQ5TnnCgiX9ikVUKFNHRgGhbgKdx5DoKPELD7P4/go-libp2p-swarm"
basichost "gx/ipfs/QmbzCT1CwxVZ2ednptC9RavuJe7Bv8DDi2Ne89qUrA37XM/go-libp2p/p2p/host/basic"
peerstore "gx/ipfs/QmeXj9VAjmYQZxpmVz7VzccbJrpmr8qkCDSjfVNsPTWTYU/go-libp2p-peerstore"
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
crypto "gx/ipfs/QmfWDLQjGjVe4fr5CoztYW2DYYjRysMJrFe1RCsXLPTf46/go-libp2p-crypto"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
)
// Cluster is the main IPFS cluster component. It provides
// the go-API for it and orchestrates the componenets that make up the system.
type Cluster struct {
ctx context.Context
cancel context.CancelFunc
config *ClusterConfig
host host.Host
consensus *ClusterConsensus
api ClusterAPI
ipfs IPFSConnector
state ClusterState
}
// NewCluster builds a ready-to-start IPFS Cluster. It takes a ClusterAPI,
// an IPFSConnector and a ClusterState as parameters, allowing the user,
// to provide custom implementations of these components.
func NewCluster(cfg *ClusterConfig, api ClusterAPI, ipfs IPFSConnector, state ClusterState) (*Cluster, error) {
ctx, cancel := context.WithCancel(context.Background())
host, err := makeHost(ctx, cfg)
if err != nil {
return nil, err
}
consensus, err := NewClusterConsensus(cfg, host, state)
if err != nil {
logger.Errorf("Error creating consensus: %s", err)
return nil, err
}
cluster := &Cluster{
ctx: ctx,
cancel: cancel,
config: cfg,
host: host,
consensus: consensus,
api: api,
ipfs: ipfs,
state: state,
}
logger.Info("Starting IPFS Cluster")
go cluster.run()
return cluster, nil
}
// Shutdown stops the IPFS cluster components
func (c *Cluster) Shutdown() error {
logger.Info("Shutting down IPFS Cluster")
if err := c.consensus.Shutdown(); err != nil {
logger.Errorf("Error stopping consensus: %s", err)
return err
}
if err := c.api.Shutdown(); err != nil {
logger.Errorf("Error stopping API: %s", err)
return err
}
if err := c.ipfs.Shutdown(); err != nil {
logger.Errorf("Error stopping IPFS Connector: %s", err)
return err
}
c.cancel()
return nil
}
// Pin makes the cluster Pin a Cid. This implies adding the Cid
// to the IPFS Cluster peers shared-state. Depending on the cluster
// pinning strategy, the IPFSConnector may then request the IPFS daemon
// to pin the Cid.
//
// Pin returns an error if the operation could not be persisted
// to the global state. Pin does not reflect the success or failure
// of underlying IPFS daemon pinning operations.
func (c *Cluster) Pin(h *cid.Cid) error {
// TODO: Check this hash makes any sense
logger.Info("Pinning:", h)
err := c.consensus.AddPin(h)
if err != nil {
logger.Error(err)
return err
}
return nil
}
// Unpin makes the cluster Unpin a Cid. This implies adding the Cid
// to the IPFS Cluster peers shared-state. Depending on the cluster
// unpinning strategy, the IPFSConnector may then request the IPFS daemon
// to unpin the Cid.
//
// Unpin returns an error if the operation could not be persisted
// to the global state. Unpin does not reflect the success or failure
// of underlying IPFS daemon unpinning operations.
func (c *Cluster) Unpin(h *cid.Cid) error {
// TODO: Check this hash makes any sense
logger.Info("Unpinning:", h)
err := c.consensus.RmPin(h)
if err != nil {
logger.Error(err)
return err
}
return nil
}
// Version returns the current IPFS Cluster version
func (c *Cluster) Version() string {
return Version
}
// Members returns the IDs of the members of this Cluster
func (c *Cluster) Members() []peer.ID {
return c.host.Peerstore().Peers()
}
// run reads from the RPC channels of the different components and launches
// short-lived go-routines to handle any requests.
func (c *Cluster) run() {
ipfsCh := c.ipfs.RpcChan()
consensusCh := c.consensus.RpcChan()
apiCh := c.api.RpcChan()
for {
select {
case ipfsOp := <-ipfsCh:
go c.handleOp(&ipfsOp)
case consensusOp := <-consensusCh:
go c.handleOp(&consensusOp)
case apiOp := <-apiCh:
go c.handleOp(&apiOp)
case <-c.ctx.Done():
logger.Debug("Cluster is Done()")
return
}
}
}
// handleOp takes care of running the necessary action for a
// clusterRPC request and sending the response.
func (c *Cluster) handleOp(op *ClusterRPC) {
var data interface{} = nil
var err error = nil
switch op.Method {
case PinRPC:
hash, ok := op.Arguments.(cid.Cid)
if !ok {
err = errors.New("Bad PinRPC type")
break
}
err = c.Pin(&hash)
case UnpinRPC:
hash, ok := op.Arguments.(cid.Cid)
if !ok {
err = errors.New("Bad UnpinRPC type")
break
}
err = c.Unpin(&hash)
case PinListRPC:
data, err = c.consensus.ListPins()
case IPFSPinRPC:
hash, ok := op.Arguments.(cid.Cid)
if !ok {
err = errors.New("Bad IPFSPinRPC type")
break
}
err = c.ipfs.Pin(&hash)
case IPFSUnpinRPC:
hash, ok := op.Arguments.(cid.Cid)
if !ok {
err = errors.New("Bad IPFSUnpinRPC type")
break
}
err = c.ipfs.Unpin(&hash)
case VersionRPC:
data = c.Version()
case MemberListRPC:
data = c.Members()
case RollbackRPC:
state, ok := op.Arguments.(ClusterState)
if !ok {
err = errors.New("Bad RollbackRPC type")
break
}
err = c.consensus.Rollback(state)
case LeaderRPC:
// Leader RPC is a RPC that needs to be run
// by the Consensus Leader. Arguments is a wrapped RPC.
rpc, ok := op.Arguments.(*ClusterRPC)
if !ok {
err = errors.New("Bad LeaderRPC type")
}
data, err = c.leaderRPC(rpc)
default:
logger.Error("Unknown operation. Ignoring")
}
resp := RPCResponse{
Data: data,
Error: err,
}
op.ResponseCh <- resp
}
// This uses libp2p to contact the cluster leader and ask him to do something
func (c *Cluster) leaderRPC(rpc *ClusterRPC) (interface{}, error) {
return nil, errors.New("Not implemented yet")
}
// makeHost makes a libp2p-host
func makeHost(ctx context.Context, cfg *ClusterConfig) (host.Host, error) {
ps := peerstore.NewPeerstore()
peerID, err := peer.IDB58Decode(cfg.ID)
if err != nil {
logger.Error("decoding ID: ", err)
return nil, err
}
pkb, err := base64.StdEncoding.DecodeString(cfg.PrivateKey)
if err != nil {
logger.Error("decoding private key base64: ", err)
return nil, err
}
privateKey, err := crypto.UnmarshalPrivateKey(pkb)
if err != nil {
logger.Error("unmarshaling private key", err)
return nil, err
}
publicKey := privateKey.GetPublic()
addr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d",
cfg.ConsensusListenAddr, cfg.ConsensusListenPort))
if err != nil {
return nil, err
}
if err := ps.AddPubKey(peerID, publicKey); err != nil {
return nil, err
}
if err := ps.AddPrivKey(peerID, privateKey); err != nil {
return nil, err
}
for _, cpeer := range cfg.ClusterPeers {
addr, err := multiaddr.NewMultiaddr(cpeer)
if err != nil {
logger.Errorf("parsing cluster peer multiaddress %s: %s", cpeer, err)
return nil, err
}
pid, err := addr.ValueForProtocol(multiaddr.P_IPFS)
if err != nil {
return nil, err
}
strAddr := strings.Split(addr.String(), "/ipfs/")[0]
maddr, err := multiaddr.NewMultiaddr(strAddr)
if err != nil {
return nil, err
}
peerID, err := peer.IDB58Decode(pid)
if err != nil {
return nil, err
}
ps.AddAddrs(
peerID,
[]multiaddr.Multiaddr{maddr},
peerstore.PermanentAddrTTL)
}
network, err := swarm.NewNetwork(
ctx,
[]multiaddr.Multiaddr{addr},
peerID,
ps,
nil,
)
if err != nil {
return nil, err
}
bhost := basichost.New(network)
return bhost, nil
}

1
component.go Normal file
View File

@ -0,0 +1 @@
package ipfscluster

31
config.go Normal file
View File

@ -0,0 +1,31 @@
package ipfscluster
import (
"encoding/json"
"io/ioutil"
)
type ClusterConfig struct {
IPFSHost string `json:"ipfs_host"`
IPFSPort int `json:"ipfs_port"`
ClusterAPIListenAddr string `json:"cluster_api_listen_addr"`
ClusterAPIListenPort int `json:"cluster_api_listen_port"`
IPFSAPIListenAddr string `json:"ipfs_api_listen_addr"`
IPFSAPIListenPort int `json:"ipfs_api_listen_port"`
ConsensusListenAddr string `json:"consensus_listen_addr"`
ConsensusListenPort int `json:"consensus_listen_port"`
ClusterPeers []string `json:"cluster_peers"`
ID string `json:"id"`
PrivateKey string `json:"private_key"`
RaftFolder string `json:"raft_folder"`
}
func LoadConfig(path string) (*ClusterConfig, error) {
config := &ClusterConfig{}
file, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
json.Unmarshal(file, config)
return config, nil
}

232
consensus.go Normal file
View File

@ -0,0 +1,232 @@
package ipfscluster
import (
"context"
"errors"
host "gx/ipfs/QmPTGbC34bPKaUm9wTxBo7zSCac7pDuG42ZmnXC718CKZZ/go-libp2p-host"
consensus "gx/ipfs/QmZ88KbrvZMJpXaNwAGffswcYKz8EbeafzAFGMCA6MEZKt/go-libp2p-consensus"
libp2praft "gx/ipfs/QmdHo2LQKmGQ6rDAWFxnzNuW3z8b6Xmw3wEFsMQaj9Rsqj/go-libp2p-raft"
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
)
const (
maxSnapshots = 5
raftSingleMode = true
)
// Type of pin operation
const (
LogOpPin = iota + 1
LogOpUnpin
)
type clusterLogOpType int
// clusterLogOp represents an operation for the OpLogConsensus system.
// It implements the consensus.Op interface.
type clusterLogOp struct {
Cid string
Type clusterLogOpType
ctx context.Context
rpcCh chan ClusterRPC
}
// ApplyTo applies the operation to the ClusterState
func (op clusterLogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
state, ok := cstate.(ClusterState)
if !ok {
// Should never be here
panic("Received unexpected state type")
}
cidObj, err := cid.Decode(op.Cid)
if err != nil {
// Should never be here
panic("Could not decode a CID we ourselves encoded")
}
var rpcM RPCMethod
var resp RPCResponse
ctx, cancel := context.WithCancel(op.ctx)
defer cancel()
switch op.Type {
case LogOpPin:
err = state.AddPin(cidObj)
rpcM = IPFSPinRPC
case LogOpUnpin:
err = state.RmPin(cidObj)
rpcM = IPFSUnpinRPC
default:
err = errors.New("Unknown clusterLogOp type")
}
if err != nil {
goto ROLLBACK
}
// Do we want to wait? Pins can take a very long time
resp = MakeRPC(ctx, op.rpcCh, RPC(rpcM, *cidObj), true)
if resp.Error != nil {
err = resp.Error
goto ROLLBACK
}
return state, nil
ROLLBACK:
// We failed to apply the operation to the state
// and therefore we need to request a rollback to the
// cluster to the previous state. This operation can only be performed
// by the cluster leader.
rllbckRPC := RPC(RollbackRPC, state)
leadrRPC := RPC(LeaderRPC, rllbckRPC)
MakeRPC(ctx, op.rpcCh, leadrRPC, false)
logger.Errorf("an error ocurred when applying Op to state: %s", err)
logger.Error("a rollback was requested")
// Make sure the consensus algorithm nows this update did not work
return nil, errors.New("a rollback was requested. Reason: " + err.Error())
}
// ClusterConsensus handles the work of keeping a shared-state between
// the members of an IPFS Cluster, as well as modifying that state and
// applying any updates in a thread-safe manner.
type ClusterConsensus struct {
ctx context.Context
cancel context.CancelFunc
consensus consensus.OpLogConsensus
actor consensus.Actor
rpcCh chan ClusterRPC
p2pRaft *libp2pRaftWrap
}
// NewClusterConsensus builds a new ClusterConsensus component. The state
// is used to initialize the Consensus system, so any information in it
// is discarded.
func NewClusterConsensus(cfg *ClusterConfig, host host.Host, state ClusterState) (*ClusterConsensus, error) {
logger.Info("Starting Consensus component")
ctx, cancel := context.WithCancel(context.Background())
rpcCh := make(chan ClusterRPC, RPCMaxQueue)
op := clusterLogOp{
ctx: ctx,
rpcCh: rpcCh,
}
con, actor, wrapper, err := makeLibp2pRaft(cfg, host, state, op)
if err != nil {
return nil, err
}
con.SetActor(actor)
cc := &ClusterConsensus{
ctx: ctx,
cancel: cancel,
consensus: con,
actor: actor,
rpcCh: rpcCh,
p2pRaft: wrapper,
}
return cc, nil
}
// Shutdown stops the component so it will not process any
// more updates. The underlying consensus is permanently
// shutdown, along with the libp2p transport.
func (cc *ClusterConsensus) Shutdown() error {
logger.Info("Stopping Consensus component")
cc.cancel()
// When we take snapshot, we make sure that
// we re-start from the previous state, and that
// we don't replay the log. This includes
// pin and pin certain stuff.
f := cc.p2pRaft.raft.Snapshot()
_ = f.Error()
f = cc.p2pRaft.raft.Shutdown()
err := f.Error()
cc.p2pRaft.transport.Close()
if err != nil {
return err
}
return nil
}
// RpcChan can be used by Cluster to read any
// requests from this component
func (cc *ClusterConsensus) RpcChan() <-chan ClusterRPC {
return cc.rpcCh
}
func (cc *ClusterConsensus) op(c *cid.Cid, t clusterLogOpType) clusterLogOp {
return clusterLogOp{
Cid: c.String(),
Type: t,
}
}
// AddPin submits a Cid to the shared state of the cluster.
func (cc *ClusterConsensus) AddPin(c *cid.Cid) error {
// Create pin operation for the log
op := cc.op(c, LogOpPin)
_, err := cc.consensus.CommitOp(op)
if err != nil {
// This means the op did not make it to the log
return err
}
// Note: the returned state could be nil
// if ApplyTo failed. We deal with this in ApplyTo.
// We must schedule a Rollback in that case.
// Here we only care that the operation was commited
// to the log, not if the resulting state is valid.
logger.Infof("Pin commited to global state: %s", c)
return nil
}
// RmPin removes a Cid from the shared state of the cluster.
func (cc *ClusterConsensus) RmPin(c *cid.Cid) error {
// Create unpin operation for the log
op := cc.op(c, LogOpUnpin)
_, err := cc.consensus.CommitOp(op)
if err != nil {
return err
}
// Note: the returned state could be nil
// if ApplyTo failed. We deal with this in ApplyTo.
// We must schedule a Rollback in that case.
// Here we only care that the operation was commited
// to the log, not if the resulting state is valid.
logger.Infof("Pin commited to global state: %s", c)
return nil
}
// ListPins returns the list of Cids which are part of the
// shared state of the cluster.
func (cc *ClusterConsensus) ListPins() ([]*cid.Cid, error) {
cstate, err := cc.consensus.GetLogHead()
if err != nil {
return nil, err
}
state := cstate.(ClusterState)
return state.ListPins(), nil
}
// Leader() returns the peerID of the Leader of the
// cluster.
func (cc *ClusterConsensus) Leader() peer.ID {
// FIXME: Hashicorp Raft specific
raftactor := cc.actor.(*libp2praft.Actor)
return raftactor.Leader()
}
func (cc *ClusterConsensus) Rollback(state ClusterState) error {
return cc.consensus.Rollback(state)
}

16
illustrations.md Normal file
View File

@ -0,0 +1,16 @@
# ipfs-cluster illustrations
- old ipfs-cluster design notes https://github.com/ipfs/notes/issues/58
### Design Illustrations ([source](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS))
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.001.jpg)
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.002.jpg)
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.003.jpg)
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.004.jpg)
![](https://ipfs.io/ipfs/QmWhbV7KX9toZbi4ycj6J9GVbTVvzGx5ERffc6ymYLT5HS/cluster-notes.005.jpg)

51
ipfs-cluster/main.go Normal file
View File

@ -0,0 +1,51 @@
package main
import (
"fmt"
"os"
"os/signal"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
ipfscluster "github.com/ipfs/ipfs-cluster"
)
func main() {
logging.SetLogLevel("ipfs-cluster", "debug")
signalChan := make(chan os.Signal, 1)
cleanup := make(chan bool)
signal.Notify(signalChan, os.Interrupt)
clusterCfg, err := ipfscluster.LoadConfig("../cluster.json")
if err != nil {
fmt.Println(err)
return
}
api, err := ipfscluster.NewHTTPClusterAPI(clusterCfg)
if err != nil {
fmt.Println(err)
return
}
proxy, err := ipfscluster.NewIPFSHTTPConnector(clusterCfg)
if err != nil {
fmt.Println(err)
return
}
state := ipfscluster.NewMapState()
cluster, err := ipfscluster.NewCluster(clusterCfg, api, proxy, state)
if err != nil {
fmt.Println(err)
return
}
go func() {
<-signalChan
fmt.Println("caught signal")
cluster.Shutdown()
cleanup <- true
}()
<-cleanup
}

124
ipfs_cluster.go Normal file
View File

@ -0,0 +1,124 @@
// package ipfscluster implements a wrapper for the IPFS deamon which
// allows to orchestrate a number of tasks between several IPFS nodes.
//
// IPFS Cluster uses a consensus algorithm and libP2P to keep a shared
// state between the different members of the cluster. This state is
// primarily used to keep track of pinned items, and ensure that an
// item is pinned in different places.
package ipfscluster
import (
"context"
"errors"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
"time"
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
)
var logger = logging.Logger("ipfs-cluster")
// Current Cluster version.
const Version = "0.0.1"
// RPCMaxQueue can be used to set the size of the ClusterRPC channels.
var RPCMaxQueue = 128
// MakeRPCRetryInterval specifies how long to wait before retrying
// to put a ClusterRPC request in the channel in MakeRPC().
var MakeRPCRetryInterval time.Duration = 1
// ClusterComponent represents a piece of ipfscluster. Cluster components
// usually run their own goroutines (a http server for example) which can
// be controlled via start and stop via Start() and Stop(). A ClusterRPC
// channel is used by Cluster to perform operations requested by the
// component.
type ClusterComponent interface {
Shutdown() error
RpcChan() <-chan ClusterRPC
}
// ClusterAPI is a component which offers an API for Cluster. This is
// a base component.
type ClusterAPI interface {
ClusterComponent
}
// IPFSConnector is a component which allows cluster to interact with
// an IPFS daemon.
type IPFSConnector interface {
ClusterComponent
Pin(*cid.Cid) error
Unpin(*cid.Cid) error
}
// Peered represents a component which needs to be aware of the peers
// in the Cluster.
type Peered interface {
AddPeer(p peer.ID)
RmPeer(p peer.ID)
SetPeers(peers []peer.ID)
}
// ClusterState represents the shared state of the cluster and it
// is used by the ClusterConsensus component to keep track of
// objects which are pinned and their location.
// ClusterState is in charge of implementing any advanced pinning
// strategies.
type ClusterState interface {
AddPin(*cid.Cid) error
RmPin(*cid.Cid) error
Exists(*cid.Cid) bool
ListPins() []*cid.Cid
// ShouldPin(peer.ID, *cid.Cid) bool
}
// MakeRPC sends a ClusterRPC object over a channel and waits for an answer on
// ClusterRPC.ResponseCh channel. It can be used by any ClusterComponent to
// simplify making RPC requests to Cluster. The ctx parameter must be a
// cancellable context, and can be used to timeout requests.
// If the message cannot be placed in the ClusterRPC channel, retries will be
// issued every MakeRPCRetryInterval.
func MakeRPC(ctx context.Context, ch chan ClusterRPC, r ClusterRPC, waitForResponse bool) RPCResponse {
logger.Debugf("Sending RPC %d", r.Method)
exitLoop := false
for !exitLoop {
select {
case ch <- r:
exitLoop = true
case <-ctx.Done():
logger.Debug("Cancelling sending RPC")
return RPCResponse{
Data: nil,
Error: errors.New("Operation timed out while sending RPC"),
}
default:
logger.Error("RPC channel is full. Will retry in 1 second.")
time.Sleep(MakeRPCRetryInterval * time.Second)
}
}
if !waitForResponse {
return RPCResponse{}
}
logger.Debugf("Waiting for response")
select {
case resp, ok := <-r.ResponseCh:
if !ok { // Not interested in response
logger.Warning("Response channel closed. Ignoring")
return RPCResponse{
Data: nil,
Error: nil,
}
}
return resp
case <-ctx.Done():
logger.Debug("Cancelling waiting for RPC Response")
return RPCResponse{
Data: nil,
Error: errors.New("Operation timed out while waiting for response"),
}
}
}

242
ipfs_connector.go Normal file
View File

@ -0,0 +1,242 @@
package ipfscluster
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
)
// IPFSHTTPConnector implements the IPFSConnector interface
// and provides a component which does two tasks:
//
// On one side, it proxies HTTP requests to the configured IPFS
// daemon. It is able to intercept these requests though, and
// perform extra operations on them.
//
// On the other side, it is used to perform on-demand requests
// against the configured IPFS daemom (such as a pin request).
type IPFSHTTPConnector struct {
ctx context.Context
cancel context.CancelFunc
destHost string
destPort int
listenAddr string
listenPort int
handlers map[string]func(http.ResponseWriter, *http.Request)
rpcCh chan ClusterRPC
}
type ipfsError struct {
Message string
}
// NewIPFSHTTPConnector creates the component and leaves it ready to be started
func NewIPFSHTTPConnector(cfg *ClusterConfig) (*IPFSHTTPConnector, error) {
ctx, cancel := context.WithCancel(context.Background())
ipfs := &IPFSHTTPConnector{
ctx: ctx,
cancel: cancel,
destHost: cfg.IPFSHost,
destPort: cfg.IPFSPort,
listenAddr: cfg.IPFSAPIListenAddr,
listenPort: cfg.IPFSAPIListenPort,
handlers: make(map[string]func(http.ResponseWriter, *http.Request)),
rpcCh: make(chan ClusterRPC, RPCMaxQueue),
}
logger.Infof("Starting IPFS Proxy on %s:%d", ipfs.listenAddr, ipfs.listenPort)
go ipfs.run()
return ipfs, nil
}
// This will run a custom handler if we have one for a URL.Path, or
// otherwise just proxy the requests.
func (ipfs *IPFSHTTPConnector) handle(w http.ResponseWriter, r *http.Request) {
if customHandler, ok := ipfs.handlers[r.URL.Path]; ok {
customHandler(w, r)
} else {
ipfs.defaultHandler(w, r)
}
}
// defaultHandler just proxies the requests
func (ipfs *IPFSHTTPConnector) defaultHandler(w http.ResponseWriter, r *http.Request) {
newURL := *r.URL
newURL.Host = fmt.Sprintf("%s:%d", ipfs.destHost, ipfs.destPort)
newURL.Scheme = "http"
proxyReq, err := http.NewRequest(r.Method, newURL.String(), r.Body)
if err != nil {
logger.Error("error creating proxy request: ", err)
http.Error(w, "error forwarding request", 500)
return
}
resp, err := http.DefaultClient.Do(proxyReq)
if err != nil {
logger.Error("error forwarding request: ", err)
http.Error(w, "error forwaring request", 500)
return
}
// Set response headers
for k, v := range resp.Header {
for _, s := range v {
w.Header().Add(k, s)
}
}
// And body
io.Copy(w, resp.Body)
}
func (ipfs *IPFSHTTPConnector) run() {
// This launches the proxy
go func() {
smux := http.NewServeMux()
smux.HandleFunc("/", ipfs.handle)
// Fixme: make this with closable net listener
err := http.ListenAndServe(
fmt.Sprintf("%s:%d", ipfs.listenAddr, ipfs.listenPort),
smux)
if err != nil {
logger.Error(err)
return
}
}()
go func() {
select {
case <-ipfs.ctx.Done():
return
}
}()
}
// RpcChan can be used by Cluster to read any
// requests from this component.
func (ipfs *IPFSHTTPConnector) RpcChan() <-chan ClusterRPC {
return ipfs.rpcCh
}
// Shutdown stops any listeners and stops the component from taking
// any requests.
func (ipfs *IPFSHTTPConnector) Shutdown() error {
logger.Info("Stopping IPFS Proxy")
return nil
}
// Pin performs a pin request against the configured IPFS
// daemon.
func (ipfs *IPFSHTTPConnector) Pin(hash *cid.Cid) error {
logger.Infof("IPFS Pin request for: %s", hash)
pinType, err := ipfs.pinType(hash)
if err != nil || strings.Contains(pinType, "indirect") {
// Not pinned or indirectly pinned
path := fmt.Sprintf("pin/add?arg=%s", hash)
_, err = ipfs.get(path)
return err
}
logger.Debug("object is already pinned. Doing nothing")
return nil
}
// UnPin performs an unpin request against the configured IPFS
// daemon.
func (ipfs *IPFSHTTPConnector) Unpin(hash *cid.Cid) error {
logger.Info("IPFS Unpin request for:", hash)
pinType, err := ipfs.pinType(hash)
if err == nil && !strings.Contains(pinType, "indirect") {
path := fmt.Sprintf("pin/rm?arg=%s", hash)
_, err := ipfs.get(path)
return err
}
// It is not pinned we do nothing
logger.Debug("object not directly pinned. Doing nothing")
return nil
}
// Returns how a hash is pinned
func (ipfs *IPFSHTTPConnector) pinType(hash *cid.Cid) (string, error) {
lsPath := fmt.Sprintf("pin/ls?arg=%s", hash)
body, err := ipfs.get(lsPath)
if err != nil { // Not pinned
return "", err
}
// What type of pin it is
var resp struct {
Keys map[string]struct {
Type string
}
}
err = json.Unmarshal(body, &resp)
if err != nil {
logger.Error("parsing pin/ls response")
return "", err
}
pinObj, ok := resp.Keys[hash.String()]
if !ok {
return "", errors.New("expected to find the pin in the response")
}
pinType := pinObj.Type
logger.Debug("pinType check: ", pinType)
return pinType, nil
}
// get performs the heavy lifting of a get request against
// the IPFS daemon.
func (ipfs *IPFSHTTPConnector) get(path string) ([]byte, error) {
url := fmt.Sprintf("%s/%s",
ipfs.apiURL(),
path)
resp, err := http.Get(url)
if err != nil {
logger.Error("Error unpinning:", err)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
logger.Errorf("Error reading response body: %s", err)
return nil, err
}
var ipfsErr ipfsError
decodeErr := json.Unmarshal(body, &ipfsErr)
if resp.StatusCode != http.StatusOK {
var msg string
if decodeErr == nil {
msg = fmt.Sprintf("IPFS error: %d: %s",
resp.StatusCode, ipfsErr.Message)
} else {
msg = fmt.Sprintf("IPFS error: %d: %s",
resp.StatusCode, body)
}
logger.Error(msg)
return nil, errors.New(msg)
}
return body, nil
}
// apiURL is a short-hand for building the url of the IPFS
// daemon API.
func (ipfs *IPFSHTTPConnector) apiURL() string {
return fmt.Sprintf("http://%s:%d/api/v0",
ipfs.destHost,
ipfs.destPort)
}

42
package.json Normal file
View File

@ -0,0 +1,42 @@
{
"author": "hector",
"bugs": {
"url": "https://github.com/ipfs/ipfs-cluster"
},
"gx": {
"dvcsimport": "github.com/ipfs/ipfs-cluster"
},
"gxDependencies": [
{
"author": "hsanjuan",
"hash": "QmZ88KbrvZMJpXaNwAGffswcYKz8EbeafzAFGMCA6MEZKt",
"name": "go-libp2p-consensus",
"version": "0.0.3"
},
{
"author": "whyrusleeping",
"hash": "QmbzCT1CwxVZ2ednptC9RavuJe7Bv8DDi2Ne89qUrA37XM",
"name": "go-libp2p",
"version": "4.3.0"
},
{
"author": "hsanjuan",
"hash": "QmdHo2LQKmGQ6rDAWFxnzNuW3z8b6Xmw3wEFsMQaj9Rsqj",
"name": "go-libp2p-raft",
"version": "0.1.1"
},
{
"author": "whyrusleeping",
"hash": "QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD",
"name": "go-cid",
"version": "0.7.7"
}
],
"gxVersion": "0.10.0",
"language": "go",
"license": "",
"name": "ipfs-cluster",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "0.0.0"
}

80
raft.go Normal file
View File

@ -0,0 +1,80 @@
package ipfscluster
import (
"os"
"path/filepath"
host "gx/ipfs/QmPTGbC34bPKaUm9wTxBo7zSCac7pDuG42ZmnXC718CKZZ/go-libp2p-host"
libp2praft "gx/ipfs/QmdHo2LQKmGQ6rDAWFxnzNuW3z8b6Xmw3wEFsMQaj9Rsqj/go-libp2p-raft"
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
hashiraft "github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb"
)
// libp2pRaftWrap wraps the stuff that we need to run
// hashicorp raft. We carry it around for convenience
type libp2pRaftWrap struct {
raft *hashiraft.Raft
transport *libp2praft.Libp2pTransport
snapshotStore hashiraft.SnapshotStore
logStore hashiraft.LogStore
stableStore hashiraft.StableStore
peerstore *libp2praft.Peerstore
}
// This function does all heavy the work which is specifically related to
// hashicorp's Raft. Other places should just rely on the Consensus interface.
func makeLibp2pRaft(cfg *ClusterConfig, host host.Host, state ClusterState, op clusterLogOp) (*libp2praft.Consensus, *libp2praft.Actor, *libp2pRaftWrap, error) {
transport, err := libp2praft.NewLibp2pTransportWithHost(host)
if err != nil {
logger.Error("creating libp2p-raft transport: ", err)
return nil, nil, nil, err
}
transport.OpenConns()
pstore := &libp2praft.Peerstore{}
hPeers := host.Peerstore().Peers()
strPeers := make([]string, 0, len(hPeers))
for _, p := range hPeers {
strPeers = append(strPeers, peer.IDB58Encode(p))
}
pstore.SetPeers(strPeers)
cons := libp2praft.NewOpLog(state, op)
raftCfg := hashiraft.DefaultConfig()
raftCfg.EnableSingleNode = raftSingleMode
snapshots, err := hashiraft.NewFileSnapshotStore(cfg.RaftFolder, maxSnapshots, nil)
if err != nil {
logger.Error("creating file snapshot store: ", err)
return nil, nil, nil, err
}
logStore, err := raftboltdb.NewBoltStore(filepath.Join(cfg.RaftFolder, "raft.db"))
if err != nil {
logger.Error("creating bolt store: ", err)
return nil, nil, nil, err
}
r, err := hashiraft.NewRaft(raftCfg, cons, logStore, logStore, snapshots, pstore, transport)
if err != nil {
logger.Error("initializing raft: ", err)
return nil, nil, nil, err
}
return cons, libp2praft.NewActor(r), &libp2pRaftWrap{
raft: r,
transport: transport,
snapshotStore: snapshots,
logStore: logStore,
stableStore: logStore,
peerstore: pstore,
}, nil
}
func cleanRaft(cfg *ClusterConfig) {
os.RemoveAll(cfg.RaftFolder)
}

1
response.go Normal file
View File

@ -0,0 +1 @@
package ipfscluster

40
rpc.go Normal file
View File

@ -0,0 +1,40 @@
package ipfscluster
// ClusterRPC supported methods.
const (
PinRPC = iota
UnpinRPC
PinListRPC
IPFSPinRPC
IPFSUnpinRPC
VersionRPC
MemberListRPC
RollbackRPC
LeaderRPC
)
// RPCMethod identifies which RPC-supported operation we are trying to make
type RPCMethod int
// ClusterRPC is used to let Cluster perform operations as mandated by
// its ClusterComponents. The result is placed on the ResponseCh channel.
type ClusterRPC struct {
Method RPCMethod
ResponseCh chan RPCResponse
Arguments interface{}
}
// RPC builds a ClusterRPC request.
func RPC(m RPCMethod, args interface{}) ClusterRPC {
return ClusterRPC{
Method: m,
Arguments: args,
ResponseCh: make(chan RPCResponse),
}
}
// RPC response carries the result of an ClusterRPC-requested operation
type RPCResponse struct {
Data interface{}
Error error
}

63
state.go Normal file
View File

@ -0,0 +1,63 @@
package ipfscluster
import (
peer "gx/ipfs/QmfMmLGoKzCHDN7cGgk64PJr4iipzidDRME8HABSJqvmhC/go-libp2p-peer"
cid "gx/ipfs/QmcTcsTvfaeEBRFo1TkFgT8sRmgi1n1LTZpecfVP8fzpGD/go-cid"
)
type pinMode int
const (
pinEverywhere = -1
)
// MapState is a very simple pin map representation
// PinMap must be public as it will be serialized
type MapState struct {
PinMap map[string]pinMode
rpcCh chan ClusterRPC
}
func NewMapState() MapState {
return MapState{
PinMap: make(map[string]pinMode),
rpcCh: make(chan ClusterRPC),
}
}
func (st MapState) AddPin(c *cid.Cid) error {
st.PinMap[c.String()] = pinEverywhere
return nil
}
func (st MapState) RmPin(c *cid.Cid) error {
delete(st.PinMap, c.String())
return nil
}
func (st MapState) Exists(c *cid.Cid) bool {
_, ok := st.PinMap[c.String()]
return ok
}
func (st MapState) ListPins() []*cid.Cid {
keys := make([]*cid.Cid, 0, len(st.PinMap))
for k := range st.PinMap {
c, _ := cid.Decode(k)
keys = append(keys, c)
}
return keys
}
func (st MapState) ShouldPin(p peer.ID, c *cid.Cid) bool {
return true
}
func (st MapState) Shutdown() error {
return nil
}
func (st MapState) RpcChan() <-chan ClusterRPC {
return st.rpcCh
}