Merge branch 'master' into fix/broadcast-ops
This commit is contained in:
commit
95016492c3
|
@ -27,12 +27,12 @@ jobs:
|
|||
- travis_wait go test -v -timeout 15m -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- name: "Main Tests with crdt consensus"
|
||||
- name: "Main Tests with raft consensus"
|
||||
script:
|
||||
- go test -v -failfast -consensus crdt .
|
||||
- travis_wait go test -v -timeout 15m -failfast -consensus raft .
|
||||
- name: "Main Tests with stateless tracker"
|
||||
script:
|
||||
- go test -v -failfast -tracker stateless .
|
||||
- travis_wait go test -v -timeout 15m -failfast -tracker stateless .
|
||||
- name: "Golint and go vet"
|
||||
script:
|
||||
- go get -u golang.org/x/lint/golint
|
||||
|
|
|
@ -122,6 +122,11 @@ type Client interface {
|
|||
|
||||
// MetricNames returns the list of metric types.
|
||||
MetricNames(ctx context.Context) ([]string, error)
|
||||
|
||||
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
|
||||
// returns collected CIDs. If local is true, it would garbage collect
|
||||
// only on contacted peer, otherwise on all peers' IPFS daemons.
|
||||
RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error)
|
||||
}
|
||||
|
||||
// Config allows to configure the parameters to connect
|
||||
|
|
|
@ -379,9 +379,26 @@ func (lc *loadBalancingClient) MetricNames(ctx context.Context) ([]string, error
|
|||
}
|
||||
|
||||
err := lc.retry(0, call)
|
||||
|
||||
return metricNames, err
|
||||
}
|
||||
|
||||
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
|
||||
// returns collected CIDs. If local is true, it would garbage collect
|
||||
// only on contacted peer, otherwise on all peers' IPFS daemons.
|
||||
func (lc *loadBalancingClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error) {
|
||||
var repoGC *api.GlobalRepoGC
|
||||
|
||||
call := func(c Client) error {
|
||||
var err error
|
||||
repoGC, err = c.RepoGC(ctx, local)
|
||||
return err
|
||||
}
|
||||
|
||||
err := lc.retry(0, call)
|
||||
return repoGC, err
|
||||
}
|
||||
|
||||
// Add imports files to the cluster from the given paths. A path can
|
||||
// either be a local filesystem location or an web url (http:// or https://).
|
||||
// In the latter case, the destination will be downloaded with a GET request.
|
||||
|
|
|
@ -13,9 +13,9 @@ func TestLBClient(t *testing.T) {
|
|||
// say we want to retry the request for at most 5 times
|
||||
cfgs := make([]*Config, 10)
|
||||
|
||||
// 5 empty clients
|
||||
// 5 clients with an invalid api address
|
||||
for i := 0; i < 5; i++ {
|
||||
maddr, _ := ma.NewMultiaddr("")
|
||||
maddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
cfgs[i] = &Config{
|
||||
APIAddr: maddr,
|
||||
DisableKeepAlives: true,
|
||||
|
@ -51,7 +51,7 @@ func testRunManyRequestsConcurrently(t *testing.T, cfgs []*Config, strategy LBSt
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
ctx := context.Background()
|
||||
_, err = c.ID(ctx)
|
||||
_, err := c.ID(ctx)
|
||||
if err != nil && pass {
|
||||
t.Error(err)
|
||||
}
|
||||
|
|
|
@ -351,6 +351,26 @@ func (c *defaultClient) MetricNames(ctx context.Context) ([]string, error) {
|
|||
return metricsNames, err
|
||||
}
|
||||
|
||||
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
|
||||
// returns collected CIDs. If local is true, it would garbage collect
|
||||
// only on contacted peer, otherwise on all peers' IPFS daemons.
|
||||
func (c *defaultClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "client/RepoGC")
|
||||
defer span.End()
|
||||
|
||||
var repoGC api.GlobalRepoGC
|
||||
err := c.do(
|
||||
ctx,
|
||||
"POST",
|
||||
fmt.Sprintf("/ipfs/gc?local=%t", local),
|
||||
nil,
|
||||
nil,
|
||||
&repoGC,
|
||||
)
|
||||
|
||||
return &repoGC, err
|
||||
}
|
||||
|
||||
// WaitFor is a utility function that allows for a caller to wait for a
|
||||
// paticular status for a CID (as defined by StatusFilterParams).
|
||||
// It returns the final status for that CID and an error, if there was.
|
||||
|
|
|
@ -667,3 +667,38 @@ func TestAddMultiFile(t *testing.T) {
|
|||
|
||||
testClients(t, api, testF)
|
||||
}
|
||||
|
||||
func TestRepoGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
api := testAPI(t)
|
||||
defer shutdown(api)
|
||||
|
||||
testF := func(t *testing.T, c Client) {
|
||||
globalGC, err := c.RepoGC(ctx, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if globalGC.PeerMap == nil {
|
||||
t.Fatal("expected a non-nil peer map")
|
||||
}
|
||||
|
||||
for _, gc := range globalGC.PeerMap {
|
||||
if gc.Peer == "" {
|
||||
t.Error("bad id")
|
||||
}
|
||||
if gc.Error != "" {
|
||||
t.Error("did not expect any error")
|
||||
}
|
||||
if gc.Keys == nil {
|
||||
t.Error("expected a non-nil array of IPFSRepoGC")
|
||||
} else {
|
||||
if !gc.Keys[0].Key.Equals(test.Cid1) {
|
||||
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, gc.Keys[0].Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testClients(t, api, testF)
|
||||
}
|
||||
|
|
|
@ -463,6 +463,12 @@ func (api *API) routes() []route {
|
|||
"/pins/{keyType:ipfs|ipns|ipld}/{path:.*}",
|
||||
api.unpinPathHandler,
|
||||
},
|
||||
{
|
||||
"RepoGC",
|
||||
"POST",
|
||||
"/ipfs/gc",
|
||||
api.repoGCHandler,
|
||||
},
|
||||
{
|
||||
"ConnectionGraph",
|
||||
"GET",
|
||||
|
@ -1091,6 +1097,45 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (api *API) repoGCHandler(w http.ResponseWriter, r *http.Request) {
|
||||
queryValues := r.URL.Query()
|
||||
local := queryValues.Get("local")
|
||||
|
||||
if local == "true" {
|
||||
var localRepoGC types.RepoGC
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RepoGCLocal",
|
||||
struct{}{},
|
||||
&localRepoGC,
|
||||
)
|
||||
|
||||
api.sendResponse(w, autoStatus, err, repoGCToGlobal(&localRepoGC))
|
||||
return
|
||||
}
|
||||
|
||||
var repoGC types.GlobalRepoGC
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RepoGC",
|
||||
struct{}{},
|
||||
&repoGC,
|
||||
)
|
||||
api.sendResponse(w, autoStatus, err, repoGC)
|
||||
}
|
||||
|
||||
func repoGCToGlobal(r *types.RepoGC) types.GlobalRepoGC {
|
||||
return types.GlobalRepoGC{
|
||||
PeerMap: map[string]*types.RepoGC{
|
||||
peer.IDB58Encode(r.Peer): r,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (api *API) notFoundHandler(w http.ResponseWriter, r *http.Request) {
|
||||
api.sendResponse(w, http.StatusNotFound, errors.New("not found"), nil)
|
||||
}
|
||||
|
|
|
@ -1147,6 +1147,53 @@ func TestNotFoundHandler(t *testing.T) {
|
|||
testBothEndpoints(t, tf)
|
||||
}
|
||||
|
||||
func TestAPIIPFSGCEndpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
rest := testAPI(t)
|
||||
defer rest.Shutdown(ctx)
|
||||
|
||||
testGlobalRepoGC := func(t *testing.T, gRepoGC *api.GlobalRepoGC) {
|
||||
if gRepoGC.PeerMap == nil {
|
||||
t.Fatal("expected a non-nil peer map")
|
||||
}
|
||||
|
||||
if len(gRepoGC.PeerMap) != 1 {
|
||||
t.Error("expected repo gc information for one peer")
|
||||
}
|
||||
|
||||
for _, repoGC := range gRepoGC.PeerMap {
|
||||
if repoGC.Peer == "" {
|
||||
t.Error("expected a cluster ID")
|
||||
}
|
||||
if repoGC.Error != "" {
|
||||
t.Error("did not expect any error")
|
||||
}
|
||||
if repoGC.Keys == nil {
|
||||
t.Fatal("expected a non-nil array of IPFSRepoGC")
|
||||
}
|
||||
if len(repoGC.Keys) == 0 {
|
||||
t.Fatal("expected at least one key, but found none")
|
||||
}
|
||||
if !repoGC.Keys[0].Key.Equals(test.Cid1) {
|
||||
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC.Keys[0].Key)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
tf := func(t *testing.T, url urlF) {
|
||||
var resp api.GlobalRepoGC
|
||||
makePost(t, rest, url(rest)+"/ipfs/gc?local=true", []byte{}, &resp)
|
||||
testGlobalRepoGC(t, &resp)
|
||||
|
||||
var resp1 api.GlobalRepoGC
|
||||
makePost(t, rest, url(rest)+"/ipfs/gc", []byte{}, &resp1)
|
||||
testGlobalRepoGC(t, &resp1)
|
||||
}
|
||||
|
||||
testBothEndpoints(t, tf)
|
||||
}
|
||||
|
||||
func TestCORS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
rest := testAPI(t)
|
||||
|
|
25
api/types.go
25
api/types.go
|
@ -291,11 +291,14 @@ type Version struct {
|
|||
// then id will be a key of IPFSLinks. In the event of a SwarmPeers error
|
||||
// IPFSLinks[id] == [].
|
||||
type ConnectGraph struct {
|
||||
ClusterID peer.ID
|
||||
ClusterID peer.ID `json:"cluster_id" codec:"id"`
|
||||
IDtoPeername map[string]string `json:"id_to_peername" codec:"ip,omitempty"`
|
||||
// ipfs to ipfs links
|
||||
IPFSLinks map[string][]peer.ID `json:"ipfs_links" codec:"il,omitempty"`
|
||||
// cluster to cluster links
|
||||
ClusterLinks map[string][]peer.ID `json:"cluster_links" codec:"cl,omitempty"`
|
||||
// cluster trust links
|
||||
ClusterTrustLinks map[string]bool `json:"cluster_trust_links" codec:"ctl,omitempty"`
|
||||
// cluster to ipfs links
|
||||
ClustertoIPFS map[string]peer.ID `json:"cluster_to_ipfs" codec:"ci,omitempty"`
|
||||
}
|
||||
|
@ -960,3 +963,23 @@ type IPFSRepoStat struct {
|
|||
RepoSize uint64 `codec:"r,omitempty"`
|
||||
StorageMax uint64 `codec:"s, omitempty"`
|
||||
}
|
||||
|
||||
// IPFSRepoGC represents the streaming response sent from repo gc API of IPFS.
|
||||
type IPFSRepoGC struct {
|
||||
Key cid.Cid `json:"key,omitempty" codec:"k,omitempty"`
|
||||
Error string `json:"error,omitempty" codec:"e,omitempty"`
|
||||
}
|
||||
|
||||
// RepoGC contains garbage collected CIDs from a cluster peer's IPFS daemon.
|
||||
type RepoGC struct {
|
||||
Peer peer.ID `json:"peer" codec:"p,omitempty"` // the Cluster peer ID
|
||||
Peername string `json:"peername" codec:"pn,omitempty"`
|
||||
Keys []IPFSRepoGC `json:"keys" codec:"k"`
|
||||
Error string `json:"error,omitempty" codec:"e,omitempty"`
|
||||
}
|
||||
|
||||
// GlobalRepoGC contains cluster-wide information about garbage collected CIDs
|
||||
// from IPFS.
|
||||
type GlobalRepoGC struct {
|
||||
PeerMap map[string]*RepoGC `json:"peer_map" codec:"pm,omitempty"`
|
||||
}
|
||||
|
|
62
cluster.go
62
cluster.go
|
@ -1982,3 +1982,65 @@ func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RepoGC performs garbage collection sweep on all peers' IPFS repo.
|
||||
func (c *Cluster) RepoGC(ctx context.Context) (*api.GlobalRepoGC, error) {
|
||||
_, span := trace.StartSpan(ctx, "cluster/RepoGC")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
members, err := c.consensus.Peers(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// to club `RepoGCLocal` responses of all peers into one
|
||||
globalRepoGC := api.GlobalRepoGC{PeerMap: make(map[string]*api.RepoGC)}
|
||||
for _, member := range members {
|
||||
var repoGC api.RepoGC
|
||||
err = c.rpcClient.CallContext(
|
||||
ctx,
|
||||
member,
|
||||
"Cluster",
|
||||
"RepoGCLocal",
|
||||
struct{}{},
|
||||
&repoGC,
|
||||
)
|
||||
if err == nil {
|
||||
globalRepoGC.PeerMap[peer.IDB58Encode(member)] = &repoGC
|
||||
continue
|
||||
}
|
||||
|
||||
if rpc.IsAuthorizationError(err) {
|
||||
logger.Debug("rpc auth error:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, member, err)
|
||||
|
||||
globalRepoGC.PeerMap[peer.IDB58Encode(member)] = &api.RepoGC{
|
||||
Peer: member,
|
||||
Peername: peer.IDB58Encode(member),
|
||||
Keys: []api.IPFSRepoGC{},
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
return &globalRepoGC, nil
|
||||
}
|
||||
|
||||
// RepoGCLocal performs garbage collection only on the local IPFS deamon.
|
||||
func (c *Cluster) RepoGCLocal(ctx context.Context) (*api.RepoGC, error) {
|
||||
_, span := trace.StartSpan(ctx, "cluster/RepoGCLocal")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
resp, err := c.ipfs.RepoGC(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Peer = c.id
|
||||
resp.Peername = c.config.Peername
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -109,6 +109,16 @@ func (ipfs *mockConnector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, err
|
|||
return &api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) RepoGC(ctx context.Context) (*api.RepoGC, error) {
|
||||
return &api.RepoGC{
|
||||
Keys: []api.IPFSRepoGC{
|
||||
{
|
||||
Key: test.Cid1,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
|
||||
_, err := gopath.ParsePath(path)
|
||||
if err != nil {
|
||||
|
@ -864,7 +874,7 @@ func TestClusterUnpinPath(t *testing.T) {
|
|||
// Unpin after pin should succeed
|
||||
pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{})
|
||||
if err != nil {
|
||||
t.Fatal("pin with should have worked:", err)
|
||||
t.Fatal("pin with path should have worked:", err)
|
||||
}
|
||||
if !pin.Cid.Equals(test.CidResolved) {
|
||||
t.Error("expected a different cid, found", pin.Cid.String())
|
||||
|
@ -936,3 +946,62 @@ func TestClusterRecoverAllLocal(t *testing.T) {
|
|||
t.Errorf("the pin should have been recovered, got = %v", recov[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterRepoGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cl, _, _, _ := testingCluster(t)
|
||||
defer cleanState()
|
||||
defer cl.Shutdown(ctx)
|
||||
|
||||
gRepoGC, err := cl.RepoGC(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("gc should have worked:", err)
|
||||
}
|
||||
|
||||
if gRepoGC.PeerMap == nil {
|
||||
t.Fatal("expected a non-nil peer map")
|
||||
}
|
||||
|
||||
if len(gRepoGC.PeerMap) != 1 {
|
||||
t.Error("expected repo gc information for one peer")
|
||||
}
|
||||
for _, repoGC := range gRepoGC.PeerMap {
|
||||
testRepoGC(t, repoGC)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestClusterRepoGCLocal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cl, _, _, _ := testingCluster(t)
|
||||
defer cleanState()
|
||||
defer cl.Shutdown(ctx)
|
||||
|
||||
repoGC, err := cl.RepoGCLocal(ctx)
|
||||
if err != nil {
|
||||
t.Fatal("gc should have worked:", err)
|
||||
}
|
||||
|
||||
testRepoGC(t, repoGC)
|
||||
}
|
||||
|
||||
func testRepoGC(t *testing.T, repoGC *api.RepoGC) {
|
||||
if repoGC.Peer == "" {
|
||||
t.Error("expected a cluster ID")
|
||||
}
|
||||
if repoGC.Error != "" {
|
||||
t.Error("did not expect any error")
|
||||
}
|
||||
|
||||
if repoGC.Keys == nil {
|
||||
t.Fatal("expected a non-nil array of IPFSRepoGC")
|
||||
}
|
||||
|
||||
if len(repoGC.Keys) == 0 {
|
||||
t.Fatal("expected at least one key, but found none")
|
||||
}
|
||||
|
||||
if !repoGC.Keys[0].Key.Equals(test.Cid1) {
|
||||
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC.Keys[0].Key)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,8 +20,20 @@ import (
|
|||
secio "github.com/libp2p/go-libp2p-secio"
|
||||
libp2ptls "github.com/libp2p/go-libp2p-tls"
|
||||
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
|
||||
identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Cluster peers should advertise their public IPs as soon as they
|
||||
// learn about them. Default for this is 4, which prevents clusters
|
||||
// with less than 4 peers to advertise an external address they know
|
||||
// of, therefore they cannot be remembered by other peers asap. This
|
||||
// affects dockerized setups mostly. This may announce non-dialable
|
||||
// NATed addresses too eagerly, but they should progressively be
|
||||
// cleaned up.
|
||||
identify.ActivationThresh = 1
|
||||
}
|
||||
|
||||
// NewClusterHost creates a fully-featured libp2p Host with the options from
|
||||
// the provided cluster configuration. Using that host, it creates pubsub and
|
||||
// a DHT instances, for shared use by all cluster components. The returned
|
||||
|
|
|
@ -91,6 +91,8 @@ func textFormatObject(resp interface{}) {
|
|||
for _, item := range resp.([]*api.Metric) {
|
||||
textFormatObject(item)
|
||||
}
|
||||
case *api.GlobalRepoGC:
|
||||
textFormatPrintGlobalRepoGC(resp.(*api.GlobalRepoGC))
|
||||
case []string:
|
||||
for _, item := range resp.([]string) {
|
||||
textFormatObject(item)
|
||||
|
@ -230,6 +232,38 @@ func textFormatPrintMetric(obj *api.Metric) {
|
|||
fmt.Printf("%s | %s | Expires in: %s\n", peer.IDB58Encode(obj.Peer), obj.Name, humanize.Time(time.Unix(0, obj.Expire)))
|
||||
}
|
||||
|
||||
func textFormatPrintGlobalRepoGC(obj *api.GlobalRepoGC) {
|
||||
peers := make(sort.StringSlice, 0, len(obj.PeerMap))
|
||||
for peer := range obj.PeerMap {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
peers.Sort()
|
||||
|
||||
for _, peer := range peers {
|
||||
item := obj.PeerMap[peer]
|
||||
// If peer name is set, use it instead of peer ID.
|
||||
if len(item.Peername) > 0 {
|
||||
peer = item.Peername
|
||||
}
|
||||
if item.Error != "" {
|
||||
fmt.Printf("%-15s | ERROR: %s\n", peer, item.Error)
|
||||
} else {
|
||||
fmt.Printf("%-15s\n", peer)
|
||||
}
|
||||
|
||||
fmt.Printf(" > CIDs:\n")
|
||||
for _, key := range item.Keys {
|
||||
if key.Error != "" {
|
||||
// key.Key will be empty
|
||||
fmt.Printf(" - ERROR: %s\n", key.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf(" - %s\n", key.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func textFormatPrintError(obj *api.Error) {
|
||||
fmt.Printf("An error occurred:\n")
|
||||
fmt.Printf(" Code: %d\n", obj.Code)
|
||||
|
|
|
@ -6,8 +6,8 @@ import (
|
|||
"io"
|
||||
"sort"
|
||||
|
||||
dot "github.com/kishansagathiya/go-dot"
|
||||
peer "github.com/libp2p/go-libp2p-peer"
|
||||
dot "github.com/zenground0/go-dot"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
)
|
||||
|
@ -31,8 +31,11 @@ import (
|
|||
type nodeType int
|
||||
|
||||
const (
|
||||
tCluster nodeType = iota // The cluster node type
|
||||
tIpfs // The IPFS node type
|
||||
tSelfCluster nodeType = iota // cluster self node
|
||||
tCluster // cluster node
|
||||
tTrustedCluster // trusted cluster node
|
||||
tIPFS // IPFS node
|
||||
tIPFSMissing // Missing IPFS node
|
||||
)
|
||||
|
||||
var errUnfinishedWrite = errors.New("could not complete write of line to output")
|
||||
|
@ -62,6 +65,9 @@ func makeDot(cg *api.ConnectGraph, w io.Writer, allIpfs bool) error {
|
|||
dW := dotWriter{
|
||||
w: w,
|
||||
dotGraph: dot.NewGraph("cluster"),
|
||||
self: peer.IDB58Encode(cg.ClusterID),
|
||||
trustMap: cg.ClusterTrustLinks,
|
||||
idToPeername: cg.IDtoPeername,
|
||||
ipfsEdges: ipfsEdges,
|
||||
clusterEdges: cg.ClusterLinks,
|
||||
clusterIpfsEdges: cg.ClustertoIPFS,
|
||||
|
@ -78,86 +84,176 @@ type dotWriter struct {
|
|||
w io.Writer
|
||||
dotGraph dot.Graph
|
||||
|
||||
self string
|
||||
idToPeername map[string]string
|
||||
trustMap map[string]bool
|
||||
ipfsEdges map[string][]peer.ID
|
||||
clusterEdges map[string][]peer.ID
|
||||
clusterIpfsEdges map[string]peer.ID
|
||||
}
|
||||
|
||||
func (dW *dotWriter) addSubGraph(sGraph dot.Graph, rank string) {
|
||||
sGraph.IsSubGraph = true
|
||||
sGraph.Rank = rank
|
||||
dW.dotGraph.AddSubGraph(&sGraph)
|
||||
}
|
||||
|
||||
// writes nodes to dot file output and creates and stores an ordering over nodes
|
||||
func (dW *dotWriter) addNode(id string, nT nodeType) error {
|
||||
var node dot.VertexDescription
|
||||
pid, _ := peer.IDB58Decode(id)
|
||||
node.Label = pid.ShortString()
|
||||
func (dW *dotWriter) addNode(graph *dot.Graph, id string, nT nodeType) error {
|
||||
node := dot.NewVertexDescription("")
|
||||
node.Group = id
|
||||
node.ColorScheme = "x11"
|
||||
node.FontName = "Arial"
|
||||
node.Style = "filled"
|
||||
node.FontColor = "black"
|
||||
switch nT {
|
||||
case tCluster:
|
||||
case tSelfCluster:
|
||||
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
|
||||
node.Color = "blue2"
|
||||
node.Shape = "box3d"
|
||||
node.Label = label(dW.idToPeername[id], shorten(id))
|
||||
node.Color = "orange"
|
||||
node.Peripheries = 2
|
||||
dW.clusterNodes[id] = &node
|
||||
case tIpfs:
|
||||
case tTrustedCluster:
|
||||
node.ID = fmt.Sprintf("T%d", len(dW.clusterNodes))
|
||||
node.Shape = "box3d"
|
||||
node.Label = label(dW.idToPeername[id], shorten(id))
|
||||
node.Color = "orange"
|
||||
dW.clusterNodes[id] = &node
|
||||
case tCluster:
|
||||
node.Shape = "box3d"
|
||||
node.Label = label(dW.idToPeername[id], shorten(id))
|
||||
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
|
||||
node.Color = "darkorange3"
|
||||
dW.clusterNodes[id] = &node
|
||||
case tIPFS:
|
||||
node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes))
|
||||
node.Color = "goldenrod"
|
||||
node.Shape = "cylinder"
|
||||
node.Label = label("IPFS", shorten(id))
|
||||
node.Color = "turquoise3"
|
||||
dW.ipfsNodes[id] = &node
|
||||
case tIPFSMissing:
|
||||
node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes))
|
||||
node.Shape = "cylinder"
|
||||
node.Label = label("IPFS", "Errored")
|
||||
node.Color = "firebrick1"
|
||||
dW.ipfsNodes[id] = &node
|
||||
default:
|
||||
return errUnknownNodeType
|
||||
}
|
||||
dW.dotGraph.AddVertex(&node)
|
||||
|
||||
graph.AddVertex(&node)
|
||||
return nil
|
||||
}
|
||||
|
||||
func shorten(id string) string {
|
||||
return id[:2] + "*" + id[len(id)-6:]
|
||||
}
|
||||
|
||||
func label(peername, id string) string {
|
||||
return fmt.Sprintf("< <B> %s </B> <BR/> <B> %s </B> >", peername, id)
|
||||
}
|
||||
|
||||
func (dW *dotWriter) print() error {
|
||||
dW.dotGraph.AddComment("The nodes of the connectivity graph")
|
||||
dW.dotGraph.AddComment("The cluster-service peers")
|
||||
// Write cluster nodes, use sorted order for consistent labels
|
||||
for _, k := range sortedKeys(dW.clusterEdges) {
|
||||
err := dW.addNode(k, tCluster)
|
||||
sGraphCluster := dot.NewGraph("")
|
||||
sGraphCluster.IsSubGraph = true
|
||||
sortedClusterEdges := sortedKeys(dW.clusterEdges)
|
||||
for _, k := range sortedClusterEdges {
|
||||
var err error
|
||||
if k == dW.self {
|
||||
err = dW.addNode(&sGraphCluster, k, tSelfCluster)
|
||||
} else if dW.trustMap[k] {
|
||||
err = dW.addNode(&sGraphCluster, k, tTrustedCluster)
|
||||
} else {
|
||||
err = dW.addNode(&sGraphCluster, k, tCluster)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
dW.addSubGraph(sGraphCluster, "min")
|
||||
dW.dotGraph.AddNewLine()
|
||||
|
||||
dW.dotGraph.AddComment("The ipfs peers")
|
||||
sGraphIPFS := dot.NewGraph("")
|
||||
sGraphIPFS.IsSubGraph = true
|
||||
// Write ipfs nodes, use sorted order for consistent labels
|
||||
for _, k := range sortedKeys(dW.ipfsEdges) {
|
||||
err := dW.addNode(k, tIpfs)
|
||||
err := dW.addNode(&sGraphIPFS, k, tIPFS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, k := range sortedClusterEdges {
|
||||
if _, ok := dW.clusterIpfsEdges[k]; !ok {
|
||||
err := dW.addNode(&sGraphIPFS, k, tIPFSMissing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dW.addSubGraph(sGraphIPFS, "max")
|
||||
dW.dotGraph.AddNewLine()
|
||||
|
||||
dW.dotGraph.AddComment("Edges representing active connections in the cluster")
|
||||
dW.dotGraph.AddComment("The connections among cluster-service peers")
|
||||
// Write cluster edges
|
||||
for k, v := range dW.clusterEdges {
|
||||
for _, k := range sortedClusterEdges {
|
||||
v := dW.clusterEdges[k]
|
||||
for _, id := range v {
|
||||
toNode := dW.clusterNodes[k]
|
||||
fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true)
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
|
||||
}
|
||||
}
|
||||
dW.dotGraph.AddNewLine()
|
||||
|
||||
dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons")
|
||||
// Write cluster to ipfs edges
|
||||
for k, id := range dW.clusterIpfsEdges {
|
||||
for _, k := range sortedClusterEdges {
|
||||
var fromNode *dot.VertexDescription
|
||||
toNode := dW.clusterNodes[k]
|
||||
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true)
|
||||
ipfsID, ok := dW.clusterIpfsEdges[k]
|
||||
if !ok {
|
||||
fromNode, ok2 := dW.ipfsNodes[k]
|
||||
if !ok2 {
|
||||
logger.Error("expected a node at this id")
|
||||
continue
|
||||
}
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true, "dotted")
|
||||
continue
|
||||
}
|
||||
|
||||
fromNode, ok = dW.ipfsNodes[peer.IDB58Encode(ipfsID)]
|
||||
if !ok {
|
||||
logger.Error("expected a node at this id")
|
||||
continue
|
||||
}
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
|
||||
}
|
||||
dW.dotGraph.AddNewLine()
|
||||
|
||||
dW.dotGraph.AddComment("The swarm peer connections among ipfs daemons in the cluster")
|
||||
// Write ipfs edges
|
||||
for k, v := range dW.ipfsEdges {
|
||||
for _, id := range v {
|
||||
for _, k := range sortedKeys(dW.ipfsEdges) {
|
||||
v := dW.ipfsEdges[k]
|
||||
toNode := dW.ipfsNodes[k]
|
||||
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true)
|
||||
for _, id := range v {
|
||||
idStr := peer.IDB58Encode(id)
|
||||
fromNode, ok := dW.ipfsNodes[idStr]
|
||||
if !ok {
|
||||
logger.Error("expected a node here")
|
||||
continue
|
||||
}
|
||||
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
|
||||
}
|
||||
}
|
||||
return dW.dotGraph.WriteDot(dW.w)
|
||||
return dW.dotGraph.Write(dW.w)
|
||||
}
|
||||
|
||||
func sortedKeys(dict map[string][]peer.ID) []string {
|
||||
|
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -13,36 +12,36 @@ import (
|
|||
)
|
||||
|
||||
func verifyOutput(t *testing.T, outStr string, trueStr string) {
|
||||
// Because values are printed in the order of map keys we cannot enforce
|
||||
// an exact ordering. Instead we split both strings and compare line by
|
||||
// line.
|
||||
outLines := strings.Split(outStr, "\n")
|
||||
trueLines := strings.Split(trueStr, "\n")
|
||||
sort.Strings(outLines)
|
||||
sort.Strings(trueLines)
|
||||
if len(outLines) != len(trueLines) {
|
||||
fmt.Printf("expected: %s\n actual: %s", trueStr, outStr)
|
||||
fmt.Printf("expected:\n-%s-\n\n\nactual:\n-%s-", trueStr, outStr)
|
||||
t.Fatal("Number of output lines does not match expectation")
|
||||
}
|
||||
for i := range outLines {
|
||||
if outLines[i] != trueLines[i] {
|
||||
t.Errorf("Difference in sorted outputs: %s vs %s", outLines[i], trueLines[i])
|
||||
t.Errorf("Difference in sorted outputs (%d): %s vs %s", i, outLines[i], trueLines[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var simpleIpfs = `digraph cluster {
|
||||
|
||||
/* The nodes of the connectivity graph */
|
||||
/* The cluster-service peers */
|
||||
C0 [label="<peer.ID Qm*eqhEhD>" color="blue2"]
|
||||
C1 [label="<peer.ID Qm*cgHDQJ>" color="blue2"]
|
||||
C2 [label="<peer.ID Qm*6MQmJu>" color="blue2"]
|
||||
subgraph {
|
||||
rank="min"
|
||||
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ]
|
||||
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
|
||||
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
|
||||
}
|
||||
|
||||
/* The ipfs peers */
|
||||
I0 [label="<peer.ID Qm*N5LSsq>" color="goldenrod"]
|
||||
I1 [label="<peer.ID Qm*R3DZDV>" color="goldenrod"]
|
||||
I2 [label="<peer.ID Qm*wbBsuL>" color="goldenrod"]
|
||||
subgraph {
|
||||
rank="max"
|
||||
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
}
|
||||
|
||||
/* Edges representing active connections in the cluster */
|
||||
/* The connections among cluster-service peers */
|
||||
|
@ -63,10 +62,9 @@ I0 -> I1
|
|||
I0 -> I2
|
||||
I1 -> I0
|
||||
I1 -> I2
|
||||
I2 -> I0
|
||||
I2 -> I1
|
||||
|
||||
}`
|
||||
I2 -> I0
|
||||
}`
|
||||
|
||||
var (
|
||||
pid1, _ = peer.IDB58Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
|
||||
|
@ -127,29 +125,34 @@ func TestSimpleIpfsGraphs(t *testing.T) {
|
|||
}
|
||||
|
||||
var allIpfs = `digraph cluster {
|
||||
|
||||
/* The nodes of the connectivity graph */
|
||||
/* The cluster-service peers */
|
||||
C0 [label="<peer.ID Qm*eqhEhD>" color="blue2"]
|
||||
C1 [label="<peer.ID Qm*cgHDQJ>" color="blue2"]
|
||||
C2 [label="<peer.ID Qm*6MQmJu>" color="blue2"]
|
||||
subgraph {
|
||||
rank="min"
|
||||
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ]
|
||||
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
|
||||
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
|
||||
}
|
||||
|
||||
/* The ipfs peers */
|
||||
I0 [label="<peer.ID Qm*N5LSsq>" color="goldenrod"]
|
||||
I1 [label="<peer.ID Qm*S8xccb>" color="goldenrod"]
|
||||
I2 [label="<peer.ID Qm*aaanM8>" color="goldenrod"]
|
||||
I3 [label="<peer.ID Qm*R3DZDV>" color="goldenrod"]
|
||||
I4 [label="<peer.ID Qm*wbBsuL>" color="goldenrod"]
|
||||
I5 [label="<peer.ID Qm*tWZdeD>" color="goldenrod"]
|
||||
subgraph {
|
||||
rank="max"
|
||||
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*S8xccb </B> > group="QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*aaanM8 </B> > group="QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I3 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I4 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
I5 [label=< <B> IPFS </B> <BR/> <B> Qm*tWZdeD </B> > group="QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
|
||||
}
|
||||
|
||||
/* Edges representing active connections in the cluster */
|
||||
/* The connections among cluster-service peers */
|
||||
C2 -> C0
|
||||
C2 -> C1
|
||||
C0 -> C1
|
||||
C0 -> C2
|
||||
C1 -> C0
|
||||
C1 -> C2
|
||||
C2 -> C0
|
||||
C2 -> C1
|
||||
|
||||
/* The connections between cluster peers and their ipfs daemons */
|
||||
C0 -> I3
|
||||
|
@ -157,23 +160,22 @@ C1 -> I0
|
|||
C2 -> I4
|
||||
|
||||
/* The swarm peer connections among ipfs daemons in the cluster */
|
||||
I0 -> I1
|
||||
I0 -> I2
|
||||
I0 -> I3
|
||||
I0 -> I4
|
||||
I0 -> I1
|
||||
I0 -> I2
|
||||
I0 -> I5
|
||||
I3 -> I0
|
||||
I3 -> I4
|
||||
I3 -> I1
|
||||
I3 -> I2
|
||||
I3 -> I4
|
||||
I3 -> I5
|
||||
I4 -> I3
|
||||
I4 -> I0
|
||||
I4 -> I1
|
||||
I4 -> I2
|
||||
I4 -> I3
|
||||
I4 -> I5
|
||||
|
||||
}`
|
||||
}`
|
||||
|
||||
func TestIpfsAllGraphs(t *testing.T) {
|
||||
cg := api.ConnectGraph{
|
||||
|
|
|
@ -977,6 +977,32 @@ but usually are:
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ipfs",
|
||||
Usage: "Manage IPFS daemon",
|
||||
Description: "Manage IPFS daemon",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "gc",
|
||||
Usage: "run garbage collection on IPFS repos of cluster peers",
|
||||
Description: `
|
||||
This command will instruct current Cluster peers to run "repo gc" on their
|
||||
respective IPFS daemons.
|
||||
|
||||
When --local flag is passed, it will garbage collect only on the local IPFS
|
||||
deamon, otherwise on all IPFS daemons.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
localFlag(),
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
resp, cerr := globalClient.RepoGC(ctx, c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "commands",
|
||||
Usage: "List all commands",
|
||||
|
|
|
@ -34,33 +34,33 @@ var testingClusterCfg = []byte(`{
|
|||
"state_sync_interval": "1m0s",
|
||||
"ipfs_sync_interval": "2m10s",
|
||||
"replication_factor": -1,
|
||||
"monitor_ping_interval": "350ms",
|
||||
"peer_watch_interval": "200ms",
|
||||
"monitor_ping_interval": "1s",
|
||||
"peer_watch_interval": "1s",
|
||||
"disable_repinning": false,
|
||||
"mdns_interval": "0s"
|
||||
}`)
|
||||
|
||||
var testingRaftCfg = []byte(`{
|
||||
"data_folder": "raftFolderFromTests",
|
||||
"wait_for_leader_timeout": "10s",
|
||||
"wait_for_leader_timeout": "5s",
|
||||
"commit_retries": 2,
|
||||
"commit_retry_delay": "50ms",
|
||||
"backups_rotate": 2,
|
||||
"network_timeout": "5s",
|
||||
"heartbeat_timeout": "200ms",
|
||||
"election_timeout": "200ms",
|
||||
"commit_timeout": "150ms",
|
||||
"heartbeat_timeout": "700ms",
|
||||
"election_timeout": "1s",
|
||||
"commit_timeout": "250ms",
|
||||
"max_append_entries": 256,
|
||||
"trailing_logs": 10240,
|
||||
"snapshot_interval": "2m0s",
|
||||
"snapshot_threshold": 8192,
|
||||
"leader_lease_timeout": "200ms"
|
||||
"leader_lease_timeout": "500ms"
|
||||
}`)
|
||||
|
||||
var testingCrdtCfg = []byte(`{
|
||||
"cluster_name": "crdt-test",
|
||||
"trusted_peers": ["*"],
|
||||
"rebroadcast_interval": "150ms"
|
||||
"rebroadcast_interval": "250ms"
|
||||
}`)
|
||||
|
||||
var testingBadgerCfg = []byte(`{
|
||||
|
@ -114,12 +114,12 @@ var testingTrackerCfg = []byte(`
|
|||
`)
|
||||
|
||||
var testingMonCfg = []byte(`{
|
||||
"check_interval": "550ms",
|
||||
"check_interval": "800ms",
|
||||
"failure_threshold": 6
|
||||
}`)
|
||||
|
||||
var testingDiskInfCfg = []byte(`{
|
||||
"metric_ttl": "350ms",
|
||||
"metric_ttl": "900ms",
|
||||
"metric_type": "freespace"
|
||||
}`)
|
||||
|
||||
|
|
|
@ -10,15 +10,17 @@ import (
|
|||
)
|
||||
|
||||
// ConnectGraph returns a description of which cluster peers and ipfs
|
||||
// daemons are connected to each other
|
||||
// daemons are connected to each other.
|
||||
func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
|
||||
ctx, span := trace.StartSpan(c.ctx, "cluster/ConnectGraph")
|
||||
defer span.End()
|
||||
|
||||
cg := api.ConnectGraph{
|
||||
ClusterID: c.host.ID(),
|
||||
IDtoPeername: make(map[string]string),
|
||||
IPFSLinks: make(map[string][]peer.ID),
|
||||
ClusterLinks: make(map[string][]peer.ID),
|
||||
ClusterTrustLinks: make(map[string]bool),
|
||||
ClustertoIPFS: make(map[string]peer.ID),
|
||||
}
|
||||
members, err := c.consensus.Peers(ctx)
|
||||
|
@ -26,6 +28,11 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
|
|||
return cg, err
|
||||
}
|
||||
|
||||
for _, member := range members {
|
||||
// one of the entries is for itself, but that shouldn't hurt
|
||||
cg.ClusterTrustLinks[peer.IDB58Encode(member)] = c.consensus.IsTrustedPeer(ctx, member)
|
||||
}
|
||||
|
||||
peers := make([][]*api.ID, len(members), len(members))
|
||||
|
||||
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(members))
|
||||
|
@ -49,7 +56,7 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
|
|||
}
|
||||
|
||||
selfConnection, pID := c.recordClusterLinks(&cg, p, peers[i])
|
||||
|
||||
cg.IDtoPeername[p] = pID.Peername
|
||||
// IPFS connections
|
||||
if !selfConnection {
|
||||
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p)
|
||||
|
|
19
go.mod
19
go.mod
|
@ -20,8 +20,8 @@ require (
|
|||
github.com/imdario/mergo v0.3.7
|
||||
github.com/ipfs/go-block-format v0.0.2
|
||||
github.com/ipfs/go-cid v0.0.3
|
||||
github.com/ipfs/go-datastore v0.1.0
|
||||
github.com/ipfs/go-ds-badger v0.0.5
|
||||
github.com/ipfs/go-datastore v0.1.1
|
||||
github.com/ipfs/go-ds-badger v0.0.7
|
||||
github.com/ipfs/go-ds-crdt v0.1.6
|
||||
github.com/ipfs/go-fs-lock v0.0.1
|
||||
github.com/ipfs/go-ipfs-api v0.0.2
|
||||
|
@ -39,10 +39,11 @@ require (
|
|||
github.com/ipfs/go-path v0.0.7
|
||||
github.com/ipfs/go-unixfs v0.2.2
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/kishansagathiya/go-dot v0.1.0
|
||||
github.com/lanzafame/go-libp2p-ocgorpc v0.1.1
|
||||
github.com/libp2p/go-libp2p v0.4.0
|
||||
github.com/libp2p/go-libp2p v0.4.1
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.3
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.4
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.1
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1
|
||||
github.com/libp2p/go-libp2p-core v0.2.4
|
||||
|
@ -53,17 +54,17 @@ require (
|
|||
github.com/libp2p/go-libp2p-http v0.1.4
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.2.1
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.4
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0
|
||||
github.com/libp2p/go-libp2p-protocol v0.1.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.1
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.2.0
|
||||
github.com/libp2p/go-libp2p-raft v0.1.4
|
||||
github.com/libp2p/go-libp2p-secio v0.2.0
|
||||
github.com/libp2p/go-libp2p-secio v0.2.1
|
||||
github.com/libp2p/go-libp2p-tls v0.1.2
|
||||
github.com/libp2p/go-ws-transport v0.1.2
|
||||
github.com/multiformats/go-multiaddr v0.1.1
|
||||
github.com/multiformats/go-multiaddr-dns v0.1.1
|
||||
github.com/multiformats/go-multiaddr-dns v0.2.0
|
||||
github.com/multiformats/go-multiaddr-net v0.1.1
|
||||
github.com/multiformats/go-multicodec v0.1.6
|
||||
github.com/multiformats/go-multihash v0.0.8
|
||||
|
@ -72,10 +73,8 @@ require (
|
|||
github.com/rs/cors v1.7.0
|
||||
github.com/ugorji/go/codec v1.1.7
|
||||
github.com/urfave/cli v1.22.1
|
||||
github.com/whyrusleeping/go-logging v0.0.1 // indirect
|
||||
github.com/zenground0/go-dot v0.0.0-20180912213407-94a425d4984e
|
||||
go.opencensus.io v0.22.1
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59
|
||||
gonum.org/v1/gonum v0.0.0-20190926113837-94b2bbd8ac13
|
||||
gonum.org/v1/plot v0.0.0-20190615073203-9aa86143727f
|
||||
)
|
||||
|
|
27
go.sum
27
go.sum
|
@ -237,15 +237,20 @@ github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6Nri
|
|||
github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI=
|
||||
github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI=
|
||||
github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw=
|
||||
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.2 h1:7ToQt7QByBhOTuZF2USMv+PGlMcBC7FW7FdgQ4FCsoo=
|
||||
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
|
||||
github.com/ipfs/go-ds-badger v0.0.5 h1:dxKuqw5T1Jm8OuV+lchA76H9QZFyPKZeLuT6bN42hJQ=
|
||||
github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s=
|
||||
github.com/ipfs/go-ds-badger v0.0.7 h1:NMyh88Q50HG6/S2YD58DLkq0c0/ZQPMbSojONH+PRf4=
|
||||
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
|
||||
github.com/ipfs/go-ds-crdt v0.1.6 h1:Qk8MRnJL1uQQf5yFANvyS4w38ryq870Uodk/ZLAzADw=
|
||||
github.com/ipfs/go-ds-crdt v0.1.6/go.mod h1:tEKGkB4o6s6TIn+kagavYhCG5u61ERF3y4Cx4NmQlsU=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
|
||||
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
|
||||
github.com/ipfs/go-fs-lock v0.0.1 h1:XHX8uW4jQBYWHj59XXcjg7BHlHxV9ZOYs6Y43yb7/l0=
|
||||
github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
|
||||
github.com/ipfs/go-ipfs-api v0.0.2 h1:Yg+c8oAMVrN1m0OUGfrWBF+rhfqb7uJ123gU2g1giec=
|
||||
|
@ -351,6 +356,8 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E
|
|||
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kishansagathiya/go-dot v0.1.0 h1:XPj/333a6Qn4VPFqF+e2EiyABL7yRObJ7RTAbriKA1s=
|
||||
github.com/kishansagathiya/go-dot v0.1.0/go.mod h1:U1dCUFzZ+KnBgkaCWPj2JFUQygVepVudkINK9QRsxMs=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
|
@ -393,10 +400,14 @@ github.com/libp2p/go-libp2p v0.3.1 h1:opd8/1Sm9zFG37LzNQsIzMTMeBabhlcX5VlvLrNZPV
|
|||
github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y=
|
||||
github.com/libp2p/go-libp2p v0.4.0 h1:nV2q3fdhL80OWtPyBrsoWKcw32qC4TbbR+iGjEOMRaU=
|
||||
github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI=
|
||||
github.com/libp2p/go-libp2p v0.4.1 h1:QuJnMLC/O9OaASl5lkhMOllY2Gq2er7XRSN3WIxoxt8=
|
||||
github.com/libp2p/go-libp2p v0.4.1/go.mod h1:VMXJkVzSMNd6Ia8eSQGO0EQ1mqIsLuulJbkJYnfLvYQ=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.6 h1:OCStANLLpeyQeWFUuqZJ7aS9+Bx0/uoVb1PtLA9fGTQ=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0 h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE=
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0 h1:28IM7iWMDclZeVkpiFQaWVANwXwE7zLlpbnS7yXxrfs=
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.0.1 h1:/mZuuiwntNR8RywnCFlGHLKrKLYne+qciBpQXWqp5fk=
|
||||
|
@ -415,6 +426,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.1 h1:eopfG9fAg6rEHWQO1TSrLosXDgYbbbu/RT
|
|||
github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.3 h1:WsMYYaA0PwdpgJSQu12EzPYf5ypkLSTgcOsWr7DYrgI=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.1 h1:BIul1BPoN1vPAByMh6CeD33NpGjD+PkavmUjTS7uai8=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk=
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8=
|
||||
|
@ -502,6 +515,8 @@ github.com/libp2p/go-libp2p-peerstore v0.1.0 h1:MKh7pRNPHSh1fLPj8u/M/s/napdmeNpo
|
|||
github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.4 h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0 h1:kRUES28dktfnHNIRW4Ro78F7rKBHBiw5MJpl0ikrLIA=
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0/go.mod h1:ZkyZw3d0ZFOex71halXRihWf9WH/j3OevcJdTmD0lyE=
|
||||
github.com/libp2p/go-libp2p-protocol v0.0.1 h1:+zkEmZ2yFDi5adpVE3t9dqh/N9TbpFWywowzeEzBbLM=
|
||||
|
@ -530,6 +545,8 @@ github.com/libp2p/go-libp2p-secio v0.1.0 h1:NNP5KLxuP97sE5Bu3iuwOWyT/dKEGMN5zSLM
|
|||
github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA=
|
||||
github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.6 h1:gE0P/v2h+KEXtAi9YTw2UBOSODJ4m9VuuJ+ktc2LVUo=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8=
|
||||
github.com/libp2p/go-libp2p-swarm v0.1.0 h1:HrFk2p0awrGEgch9JXK/qp/hfjqQfgNxpLWnCiWPg5s=
|
||||
|
@ -686,8 +703,8 @@ github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/94
|
|||
github.com/multiformats/go-multiaddr-dns v0.0.3 h1:P19q/k9jwmtgh+qXFkKfgFM7rCg/9l5AVqh7VNxSXhs=
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
|
||||
github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY=
|
||||
github.com/multiformats/go-multiaddr-dns v0.1.1 h1:A1V9mkfbThDsMnhOFLFu5tg4kpsjLWuvlRbBXeQRpo8=
|
||||
github.com/multiformats/go-multiaddr-dns v0.1.1/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY=
|
||||
github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA=
|
||||
github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
|
@ -836,8 +853,6 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdz
|
|||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc=
|
||||
github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE=
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg=
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8=
|
||||
github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=
|
||||
|
@ -855,8 +870,6 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:
|
|||
github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/zenground0/go-dot v0.0.0-20180912213407-94a425d4984e h1:GN1PUQ/MNDdtiZZhCAnZ4PwTcslUM8qWVz8q2bLkDeM=
|
||||
github.com/zenground0/go-dot v0.0.0-20180912213407-94a425d4984e/go.mod h1:T00FaxHq4SlnicuZFole4yRAgcjWtqbMcUXBfXAYvaI=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
|
@ -888,6 +901,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoeb
|
|||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59 h1:PyXRxSVbvzDGuqYXjHndV7xDzJ7w2K8KD9Ef8GB7KOE=
|
||||
golang.org/x/crypto v0.0.0-20191105034135-c7e5f84aec59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
|
|
@ -90,6 +90,8 @@ type IPFSConnector interface {
|
|||
// RepoStat returns the current repository size and max limit as
|
||||
// provided by "repo stat".
|
||||
RepoStat(context.Context) (*api.IPFSRepoStat, error)
|
||||
// RepoGC performs garbage collection sweep on the IPFS repo.
|
||||
RepoGC(context.Context) (*api.RepoGC, error)
|
||||
// Resolve returns a cid given a path.
|
||||
Resolve(context.Context, string) (cid.Cid, error)
|
||||
// BlockPut directly adds a block of data to the IPFS repo.
|
||||
|
|
|
@ -56,8 +56,9 @@ var (
|
|||
customLogLvlFacilities = logFacilities{}
|
||||
|
||||
ptracker = "map"
|
||||
consensus = "raft"
|
||||
consensus = "crdt"
|
||||
|
||||
ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL
|
||||
testsFolder = "clusterTestsFolder"
|
||||
|
||||
// When testing with fixed ports...
|
||||
|
@ -124,6 +125,10 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
}
|
||||
|
||||
diskInfCfg := &disk.Config{}
|
||||
diskInfCfg.LoadJSON(testingDiskInfCfg)
|
||||
ttlDelayTime = diskInfCfg.MetricTTL * 2
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
|
@ -298,14 +303,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host
|
|||
dhts := make([]*dht.IpfsDHT, nClusters, nClusters)
|
||||
|
||||
tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
|
||||
quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
|
||||
// Disable quic as it is proving a bit unstable
|
||||
//quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
|
||||
for i := range hosts {
|
||||
priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr})
|
||||
h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{tcpaddr})
|
||||
hosts[i] = h
|
||||
dhts[i] = d
|
||||
pubsubs[i] = p
|
||||
|
@ -382,6 +388,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
|
|||
clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0])
|
||||
<-clusters[0].Ready()
|
||||
bootstrapAddr := clusterAddr(clusters[0])
|
||||
|
||||
// Start the rest and join
|
||||
for i := 1; i < nClusters; i++ {
|
||||
clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i])
|
||||
|
@ -481,9 +488,7 @@ func pinDelay() {
|
|||
}
|
||||
|
||||
func ttlDelay() {
|
||||
diskInfCfg := &disk.Config{}
|
||||
diskInfCfg.LoadJSON(testingDiskInfCfg)
|
||||
time.Sleep(diskInfCfg.MetricTTL * 3)
|
||||
time.Sleep(ttlDelayTime)
|
||||
}
|
||||
|
||||
// Like waitForLeader but letting metrics expire before waiting, and
|
||||
|
@ -1916,6 +1921,10 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[s
|
|||
}
|
||||
}
|
||||
|
||||
if len(graph.ClusterTrustLinks) != peerNum {
|
||||
t.Errorf("Unexpected number of trust links in graph")
|
||||
}
|
||||
|
||||
// Check that the mocked ipfs swarm is recorded
|
||||
if len(graph.IPFSLinks) != 1 {
|
||||
t.Error("Expected exactly one ipfs peer for all cluster nodes, the mocked peer")
|
||||
|
@ -2086,6 +2095,30 @@ func TestClustersDisabledRepinning(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRepoGC(t *testing.T) {
|
||||
clusters, mock := createClusters(t)
|
||||
defer shutdownClusters(t, clusters, mock)
|
||||
f := func(t *testing.T, c *Cluster) {
|
||||
gRepoGC, err := c.RepoGC(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal("gc should have worked:", err)
|
||||
}
|
||||
|
||||
if gRepoGC.PeerMap == nil {
|
||||
t.Fatal("expected a non-nil peer map")
|
||||
}
|
||||
|
||||
if len(gRepoGC.PeerMap) != nClusters {
|
||||
t.Errorf("expected repo gc information for %d peer", nClusters)
|
||||
}
|
||||
for _, repoGC := range gRepoGC.PeerMap {
|
||||
testRepoGC(t, repoGC)
|
||||
}
|
||||
}
|
||||
|
||||
runF(t, clusters, f)
|
||||
}
|
||||
|
||||
func TestClustersFollowerMode(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
clusters, mock := createClusters(t)
|
||||
|
|
|
@ -23,6 +23,7 @@ const (
|
|||
DefaultIPFSRequestTimeout = 5 * time.Minute
|
||||
DefaultPinTimeout = 24 * time.Hour
|
||||
DefaultUnpinTimeout = 3 * time.Hour
|
||||
DefaultRepoGCTimeout = 24 * time.Hour
|
||||
DefaultUnpinDisable = false
|
||||
)
|
||||
|
||||
|
@ -48,6 +49,8 @@ type Config struct {
|
|||
// Unpin Operation timeout
|
||||
UnpinTimeout time.Duration
|
||||
|
||||
// RepoGC Operation timeout
|
||||
RepoGCTimeout time.Duration
|
||||
// Disables the unpin operation and returns an error.
|
||||
UnpinDisable bool
|
||||
|
||||
|
@ -61,6 +64,7 @@ type jsonConfig struct {
|
|||
IPFSRequestTimeout string `json:"ipfs_request_timeout"`
|
||||
PinTimeout string `json:"pin_timeout"`
|
||||
UnpinTimeout string `json:"unpin_timeout"`
|
||||
RepoGCTimeout string `json:"repogc_timeout"`
|
||||
UnpinDisable bool `json:"unpin_disable,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -77,6 +81,7 @@ func (cfg *Config) Default() error {
|
|||
cfg.IPFSRequestTimeout = DefaultIPFSRequestTimeout
|
||||
cfg.PinTimeout = DefaultPinTimeout
|
||||
cfg.UnpinTimeout = DefaultUnpinTimeout
|
||||
cfg.RepoGCTimeout = DefaultRepoGCTimeout
|
||||
cfg.UnpinDisable = DefaultUnpinDisable
|
||||
|
||||
return nil
|
||||
|
@ -121,6 +126,11 @@ func (cfg *Config) Validate() error {
|
|||
if cfg.UnpinTimeout < 0 {
|
||||
err = errors.New("ipfshttp.unpin_timeout invalid")
|
||||
}
|
||||
|
||||
if cfg.RepoGCTimeout < 0 {
|
||||
err = errors.New("ipfshttp.repogc_timeout invalid")
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
@ -154,6 +164,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
|
|||
&config.DurationOpt{Duration: jcfg.IPFSRequestTimeout, Dst: &cfg.IPFSRequestTimeout, Name: "ipfs_request_timeout"},
|
||||
&config.DurationOpt{Duration: jcfg.PinTimeout, Dst: &cfg.PinTimeout, Name: "pin_timeout"},
|
||||
&config.DurationOpt{Duration: jcfg.UnpinTimeout, Dst: &cfg.UnpinTimeout, Name: "unpin_timeout"},
|
||||
&config.DurationOpt{Duration: jcfg.RepoGCTimeout, Dst: &cfg.RepoGCTimeout, Name: "repogc_timeout"},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -189,6 +200,7 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
|
|||
jcfg.IPFSRequestTimeout = cfg.IPFSRequestTimeout.String()
|
||||
jcfg.PinTimeout = cfg.PinTimeout.String()
|
||||
jcfg.UnpinTimeout = cfg.UnpinTimeout.String()
|
||||
jcfg.RepoGCTimeout = cfg.RepoGCTimeout.String()
|
||||
jcfg.UnpinDisable = cfg.UnpinDisable
|
||||
|
||||
return
|
||||
|
|
|
@ -13,7 +13,8 @@ var cfgJSON = []byte(`
|
|||
"connect_swarms_delay": "7s",
|
||||
"ipfs_request_timeout": "5m0s",
|
||||
"pin_timeout": "24h",
|
||||
"unpin_timeout": "3h"
|
||||
"unpin_timeout": "3h",
|
||||
"repogc_timeout": "24h"
|
||||
}
|
||||
`)
|
||||
|
||||
|
|
|
@ -93,6 +93,11 @@ type ipfsResolveResp struct {
|
|||
Path string
|
||||
}
|
||||
|
||||
type ipfsRepoGCResp struct {
|
||||
Key cid.Cid
|
||||
Error string
|
||||
}
|
||||
|
||||
type ipfsRefsResp struct {
|
||||
Ref string
|
||||
Err string
|
||||
|
@ -757,6 +762,47 @@ func (ipfs *Connector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error)
|
|||
return &stats, nil
|
||||
}
|
||||
|
||||
// RepoGC performs a garbage collection sweep on the cluster peer's IPFS repo.
|
||||
func (ipfs *Connector) RepoGC(ctx context.Context) (*api.RepoGC, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoGC")
|
||||
defer span.End()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ipfs.config.RepoGCTimeout)
|
||||
defer cancel()
|
||||
|
||||
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), "repo/gc?stream-errors=true", "", nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(res.Body)
|
||||
repoGC := &api.RepoGC{
|
||||
Keys: []api.IPFSRepoGC{},
|
||||
}
|
||||
for {
|
||||
resp := ipfsRepoGCResp{}
|
||||
|
||||
if err := dec.Decode(&resp); err != nil {
|
||||
// If we cancelled the request we should tell the user
|
||||
// (in case dec.Decode() exited cleanly with an EOF).
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return repoGC, ctx.Err()
|
||||
default:
|
||||
if err == io.EOF {
|
||||
return repoGC, nil // clean exit
|
||||
}
|
||||
logger.Error(err)
|
||||
return repoGC, err // error decoding
|
||||
}
|
||||
}
|
||||
|
||||
repoGC.Keys = append(repoGC.Keys, api.IPFSRepoGC{Key: resp.Key, Error: resp.Error})
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve accepts ipfs or ipns path and resolves it into a cid
|
||||
func (ipfs *Connector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Resolve")
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
logging "github.com/ipfs/go-log"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
merkledag "github.com/ipfs/go-merkledag"
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/test"
|
||||
)
|
||||
|
@ -409,3 +410,39 @@ func TestConfigKey(t *testing.T) {
|
|||
t.Error("should not work with a bad path")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepoGC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ipfs, mock := testIPFSConnector(t)
|
||||
defer mock.Close()
|
||||
defer ipfs.Shutdown(ctx)
|
||||
|
||||
res, err := ipfs.RepoGC(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.Error != "" {
|
||||
t.Errorf("expected error to be empty: %s", res.Error)
|
||||
}
|
||||
|
||||
if res.Keys == nil {
|
||||
t.Fatal("expected a non-nil array of IPFSRepoGC")
|
||||
}
|
||||
|
||||
if len(res.Keys) < 5 {
|
||||
t.Fatal("expected at least five keys")
|
||||
}
|
||||
|
||||
if !res.Keys[0].Key.Equals(test.Cid1) {
|
||||
t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid1, res.Keys[0].Key)
|
||||
}
|
||||
|
||||
if !res.Keys[3].Key.Equals(test.Cid4) {
|
||||
t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid4, res.Keys[3].Key)
|
||||
}
|
||||
|
||||
if res.Keys[4].Error != merkledag.ErrLinkNotFound.Error() {
|
||||
t.Errorf("expected different error, expected: %s, found: %s\n", merkledag.ErrLinkNotFound, res.Keys[4].Error)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func TestClustersPeerAdd(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pinDelay()
|
||||
ttlDelay()
|
||||
|
||||
f := func(t *testing.T, c *Cluster) {
|
||||
ids := c.Peers(ctx)
|
||||
|
@ -420,7 +420,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
|
|||
// We choose to remove the leader, to make things even more interesting
|
||||
chosenID, err := clusters[0].consensus.Leader(ctx)
|
||||
if err != nil {
|
||||
// choose a random peer
|
||||
// choose a random peer - crdt
|
||||
i := rand.Intn(nClusters)
|
||||
chosenID = clusters[i].host.ID()
|
||||
}
|
||||
|
@ -527,7 +527,7 @@ func TestClustersPeerJoin(t *testing.T) {
|
|||
|
||||
h := test.Cid1
|
||||
clusters[0].Pin(ctx, h, api.PinOptions{})
|
||||
pinDelay()
|
||||
ttlDelay()
|
||||
|
||||
for _, p := range clusters {
|
||||
t.Log(p.id.String())
|
||||
|
@ -574,7 +574,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
|
|||
|
||||
h := test.Cid1
|
||||
clusters[0].Pin(ctx, h, api.PinOptions{})
|
||||
pinDelay()
|
||||
ttlDelay()
|
||||
|
||||
f2 := func(t *testing.T, c *Cluster) {
|
||||
peers := c.Peers(ctx)
|
||||
|
|
|
@ -342,7 +342,7 @@ func (pm *Manager) SetPriority(pid peer.ID, prio int) error {
|
|||
return pm.host.Peerstore().Put(pid, PriorityTag, prio)
|
||||
}
|
||||
|
||||
// HandlePeerFound implements the Notifee interface for discovery.
|
||||
// HandlePeerFound implements the Notifee interface for discovery (mdns).
|
||||
func (pm *Manager) HandlePeerFound(p peer.AddrInfo) {
|
||||
addrs, err := peer.AddrInfoToP2pAddrs(&p)
|
||||
if err != nil {
|
||||
|
|
20
rpc_api.go
20
rpc_api.go
|
@ -418,6 +418,26 @@ func (rpcapi *ClusterRPCAPI) BlockAllocate(ctx context.Context, in *api.Pin, out
|
|||
return nil
|
||||
}
|
||||
|
||||
// RepoGC performs garbage collection sweep on all peers' repos.
|
||||
func (rpcapi *ClusterRPCAPI) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error {
|
||||
res, err := rpcapi.c.RepoGC(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// RepoGCLocal performs garbage collection sweep only on the local peer's IPFS daemon.
|
||||
func (rpcapi *ClusterRPCAPI) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error {
|
||||
res, err := rpcapi.c.RepoGCLocal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = *res
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendInformerMetric runs Cluster.sendInformerMetric().
|
||||
func (rpcapi *ClusterRPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
|
||||
m, err := rpcapi.c.sendInformerMetric(ctx)
|
||||
|
|
|
@ -22,6 +22,8 @@ var DefaultRPCPolicy = map[string]RPCEndpointType{
|
|||
"Cluster.RecoverAll": RPCClosed,
|
||||
"Cluster.RecoverAllLocal": RPCTrusted,
|
||||
"Cluster.RecoverLocal": RPCTrusted,
|
||||
"Cluster.RepoGC": RPCClosed,
|
||||
"Cluster.RepoGCLocal": RPCTrusted,
|
||||
"Cluster.SendInformerMetric": RPCClosed,
|
||||
"Cluster.Status": RPCClosed,
|
||||
"Cluster.StatusAll": RPCClosed,
|
||||
|
|
|
@ -55,7 +55,7 @@ func MultiCancel(cancels []context.CancelFunc) {
|
|||
}
|
||||
}
|
||||
|
||||
// The copy functions below are used in calls to Cluste.multiRPC()
|
||||
// The copy functions below are used in calls to Cluster.multiRPC()
|
||||
|
||||
// CopyPIDsToIfaces converts a peer.ID slice to an empty interface
|
||||
// slice using pointers to each elements of the original slice.
|
||||
|
@ -114,6 +114,18 @@ func CopyPinInfoSliceToIfaces(in [][]*api.PinInfo) []interface{} {
|
|||
return ifaces
|
||||
}
|
||||
|
||||
// CopyRepoGCSliceToIfaces converts an api.RepoGC slice to
|
||||
// an empty interface slice using pointers to each elements of
|
||||
// the original slice. Useful to handle gorpc.MultiCall() replies.
|
||||
func CopyRepoGCSliceToIfaces(in []*api.RepoGC) []interface{} {
|
||||
ifaces := make([]interface{}, len(in), len(in))
|
||||
for i := range in {
|
||||
in[i] = &api.RepoGC{}
|
||||
ifaces[i] = in[i]
|
||||
}
|
||||
return ifaces
|
||||
}
|
||||
|
||||
// CopyEmptyStructToIfaces converts an empty struct slice to an empty interface
|
||||
// slice using pointers to each elements of the original slice.
|
||||
// Useful to handle gorpc.MultiCall() replies.
|
||||
|
|
|
@ -105,6 +105,11 @@ type mockBlockPutResp struct {
|
|||
Key string
|
||||
}
|
||||
|
||||
type mockRepoGCResp struct {
|
||||
Key cid.Cid `json:",omitempty"`
|
||||
Error string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewIpfsMock returns a new mock.
|
||||
func NewIpfsMock(t *testing.T) *IpfsMock {
|
||||
store := inmem.New()
|
||||
|
@ -379,6 +384,33 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
|
|||
goto ERROR
|
||||
}
|
||||
w.Write(data)
|
||||
case "repo/gc":
|
||||
// It assumes `/repo/gc` with parameter `stream-errors=true`
|
||||
enc := json.NewEncoder(w)
|
||||
resp := []mockRepoGCResp{
|
||||
{
|
||||
Key: Cid1,
|
||||
},
|
||||
{
|
||||
Key: Cid2,
|
||||
},
|
||||
{
|
||||
Key: Cid3,
|
||||
},
|
||||
{
|
||||
Key: Cid4,
|
||||
},
|
||||
{
|
||||
Error: "no link by that name",
|
||||
},
|
||||
}
|
||||
|
||||
for _, r := range resp {
|
||||
if err := enc.Encode(&r); err != nil {
|
||||
goto ERROR
|
||||
}
|
||||
}
|
||||
|
||||
case "repo/stat":
|
||||
sizeOnly := r.URL.Query().Get("size-only")
|
||||
list, err := m.pinMap.List(ctx)
|
||||
|
|
|
@ -321,6 +321,30 @@ func (mock *mockCluster) BlockAllocate(ctx context.Context, in *api.Pin, out *[]
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockCluster) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error {
|
||||
localrepoGC := &api.RepoGC{}
|
||||
_ = mock.RepoGCLocal(ctx, struct{}{}, localrepoGC)
|
||||
*out = api.GlobalRepoGC{
|
||||
PeerMap: map[string]*api.RepoGC{
|
||||
peer.IDB58Encode(PeerID1): localrepoGC,
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockCluster) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error {
|
||||
*out = api.RepoGC{
|
||||
Peer: PeerID1,
|
||||
Keys: []api.IPFSRepoGC{
|
||||
{
|
||||
Key: Cid1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockCluster) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user