Add ability to run Garbage Collector on all peers

- cluster method, ipfs connector method, rpc and rest apis,
command, etc for repo gc
    - Remove extra space from policy generator
    - Added special timeout for `/repo/gc` call to IPFS
    - Added `RepoGCLocal` cluster rpc method, which will be used to run gc
    on local IPFS daemon
    - Added peer name to the repo gc struct
    - Sorted with peer ids, while formatting(only affects cli
    results)
    - Special timeout setting where timeout gets checked from last update
    - Added `local` argument, which would run gc only on contacted peer
This commit is contained in:
Kishan Mohanbhai Sagathiya 2019-10-22 11:10:32 +05:30
parent 04a5881b07
commit 492b5612e7
22 changed files with 606 additions and 7 deletions

View File

@ -119,6 +119,11 @@ type Client interface {
// Metrics returns a map with the latest metrics of matching name
// for the current cluster peers.
Metrics(ctx context.Context, name string) ([]*api.Metric, error)
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error)
}
// Config allows to configure the parameters to connect

View File

@ -369,6 +369,22 @@ func (lc *loadBalancingClient) Metrics(ctx context.Context, name string) ([]*api
return metrics, err
}
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
func (lc *loadBalancingClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error) {
var repoGC *api.GlobalRepoGC
call := func(c Client) error {
var err error
repoGC, err = c.RepoGC(ctx, local)
return err
}
err := lc.retry(0, call)
return repoGC, err
}
// Add imports files to the cluster from the given paths. A path can
// either be a local filesystem location or an web url (http:// or https://).
// In the latter case, the destination will be downloaded with a GET request.

View File

@ -334,6 +334,26 @@ func (c *defaultClient) Metrics(ctx context.Context, name string) ([]*api.Metric
return metrics, err
}
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
func (c *defaultClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepoGC, error) {
ctx, span := trace.StartSpan(ctx, "client/RepoGC")
defer span.End()
var repoGC api.GlobalRepoGC
err := c.do(
ctx,
"POST",
fmt.Sprintf("/ipfs/gc?local=%t", local),
nil,
nil,
&repoGC,
)
return &repoGC, err
}
// WaitFor is a utility function that allows for a caller to wait for a
// paticular status for a CID (as defined by StatusFilterParams).
// It returns the final status for that CID and an error, if there was.

View File

@ -648,3 +648,38 @@ func TestAddMultiFile(t *testing.T) {
testClients(t, api, testF)
}
func TestRepoGC(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
globalGC, err := c.RepoGC(ctx, false)
if err != nil {
t.Fatal(err)
}
if globalGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
for _, gc := range globalGC.PeerMap {
if gc.Peer == "" {
t.Error("bad id")
}
if gc.Error != "" {
t.Error("did not expect any error")
}
if gc.Keys == nil {
t.Error("expected a non-nil array of IPFSRepoGC")
} else {
if !gc.Keys[0].Key.Equals(test.Cid1) {
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, gc.Keys[0].Key)
}
}
}
}
testClients(t, api, testF)
}

View File

@ -456,6 +456,12 @@ func (api *API) routes() []route {
"/pins/{keyType:ipfs|ipns|ipld}/{path:.*}",
api.unpinPathHandler,
},
{
"RepoGC",
"POST",
"/ipfs/gc",
api.repoGCHandler,
},
{
"ConnectionGraph",
"GET",
@ -1065,6 +1071,45 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (api *API) repoGCHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if local == "true" {
var localRepoGC types.RepoGC
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RepoGCLocal",
struct{}{},
&localRepoGC,
)
api.sendResponse(w, autoStatus, err, repoGCToGlobal(&localRepoGC))
return
}
var repoGC types.GlobalRepoGC
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RepoGC",
struct{}{},
&repoGC,
)
api.sendResponse(w, autoStatus, err, repoGC)
}
func repoGCToGlobal(r *types.RepoGC) types.GlobalRepoGC {
return types.GlobalRepoGC{
PeerMap: map[string]*types.RepoGC{
peer.IDB58Encode(r.Peer): r,
},
}
}
func (api *API) notFoundHandler(w http.ResponseWriter, r *http.Request) {
api.sendResponse(w, http.StatusNotFound, errors.New("not found"), nil)
}

View File

@ -1124,6 +1124,53 @@ func TestNotFoundHandler(t *testing.T) {
testBothEndpoints(t, tf)
}
func TestAPIIPFSGCEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
testGlobalRepoGC := func(t *testing.T, gRepoGC *api.GlobalRepoGC) {
if gRepoGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
if len(gRepoGC.PeerMap) != 1 {
t.Error("expected repo gc information for one peer")
}
for _, repoGC := range gRepoGC.PeerMap {
if repoGC.Peer == "" {
t.Error("expected a cluster ID")
}
if repoGC.Error != "" {
t.Error("did not expect any error")
}
if repoGC.Keys == nil {
t.Fatal("expected a non-nil array of IPFSRepoGC")
}
if len(repoGC.Keys) == 0 {
t.Fatal("expected at least one key, but found none")
}
if !repoGC.Keys[0].Key.Equals(test.Cid1) {
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC.Keys[0].Key)
}
}
}
tf := func(t *testing.T, url urlF) {
var resp api.GlobalRepoGC
makePost(t, rest, url(rest)+"/ipfs/gc?local=true", []byte{}, &resp)
testGlobalRepoGC(t, &resp)
var resp1 api.GlobalRepoGC
makePost(t, rest, url(rest)+"/ipfs/gc", []byte{}, &resp1)
testGlobalRepoGC(t, &resp1)
}
testBothEndpoints(t, tf)
}
func TestCORS(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)

View File

@ -897,3 +897,23 @@ type IPFSRepoStat struct {
RepoSize uint64 `codec:"r,omitempty"`
StorageMax uint64 `codec:"s, omitempty"`
}
// IPFSRepoGC represents the streaming response sent from repo gc API of IPFS.
type IPFSRepoGC struct {
Key cid.Cid `json:"key,omitempty" codec:"k,omitempty"`
Error string `json:"error,omitempty" codec:"e,omitempty"`
}
// RepoGC contains garbage collected CIDs from a cluster peer's IPFS daemon.
type RepoGC struct {
Peer peer.ID `json:"peer" codec:"p,omitempty"` // the Cluster peer ID
Peername string `json:"peername" codec:"pn,omitempty"`
Keys []IPFSRepoGC `json:"keys" codec:"k"`
Error string `json:"error,omitempty" codec:"e,omitempty"`
}
// GlobalRepoGC contains cluster-wide information about garbage collected CIDs
// from IPFS.
type GlobalRepoGC struct {
PeerMap map[string]*RepoGC `json:"peer_map" codec:"pm,omitempty"`
}

View File

@ -1883,3 +1883,73 @@ func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
}
return
}
// RepoGC performs garbage collection sweep on all peers' IPFS repo.
func (c *Cluster) RepoGC(ctx context.Context) (*api.GlobalRepoGC, error) {
_, span := trace.StartSpan(ctx, "cluster/RepoGC")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
members, err := c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return nil, err
}
lenMembers := len(members)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
repoGCsResp := make([]*api.RepoGC, lenMembers, lenMembers)
errs := c.rpcClient.MultiCall(
ctxs,
members,
"Cluster",
"RepoGCLocal",
struct{}{},
rpcutil.CopyRepoGCSliceToIfaces(repoGCsResp),
)
// clubbing `RepoGCLocal` responses of all peers into one
globalRepoGC := api.GlobalRepoGC{PeerMap: make(map[string]*api.RepoGC)}
for i, resp := range repoGCsResp {
e := errs[i]
if e == nil {
globalRepoGC.PeerMap[peer.IDB58Encode(members[i])] = resp
continue
}
if rpc.IsAuthorizationError(e) {
logger.Debug("rpc auth error:", e)
continue
}
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
globalRepoGC.PeerMap[peer.IDB58Encode(members[i])] = &api.RepoGC{
Peer: members[i],
Peername: peer.IDB58Encode(members[i]),
Keys: []api.IPFSRepoGC{},
Error: err.Error(),
}
}
return &globalRepoGC, nil
}
// RepoGCLocal performs garbage collection only on the local IPFS deamon.
func (c *Cluster) RepoGCLocal(ctx context.Context) (*api.RepoGC, error) {
_, span := trace.StartSpan(ctx, "cluster/RepoGCLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
resp, err := c.ipfs.RepoGC(ctx)
if err != nil {
return nil, err
}
resp.Peer = c.id
resp.Peername = c.config.Peername
return resp, nil
}

View File

@ -109,6 +109,16 @@ func (ipfs *mockConnector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, err
return &api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
}
func (ipfs *mockConnector) RepoGC(ctx context.Context) (*api.RepoGC, error) {
return &api.RepoGC{
Keys: []api.IPFSRepoGC{
{
Key: test.Cid1,
},
},
}, nil
}
func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
_, err := gopath.ParsePath(path)
if err != nil {
@ -849,7 +859,7 @@ func TestClusterUnpinPath(t *testing.T) {
// Unpin after pin should succeed
pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{})
if err != nil {
t.Fatal("pin with should have worked:", err)
t.Fatal("pin with path should have worked:", err)
}
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
@ -921,3 +931,62 @@ func TestClusterRecoverAllLocal(t *testing.T) {
t.Errorf("the pin should have been recovered, got = %v", recov[0].Status)
}
}
func TestClusterRepoGC(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
gRepoGC, err := cl.RepoGC(ctx)
if err != nil {
t.Fatal("gc should have worked:", err)
}
if gRepoGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
if len(gRepoGC.PeerMap) != 1 {
t.Error("expected repo gc information for one peer")
}
for _, repoGC := range gRepoGC.PeerMap {
testRepoGC(t, repoGC)
}
}
func TestClusterRepoGCLocal(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
repoGC, err := cl.RepoGCLocal(ctx)
if err != nil {
t.Fatal("gc should have worked:", err)
}
testRepoGC(t, repoGC)
}
func testRepoGC(t *testing.T, repoGC *api.RepoGC) {
if repoGC.Peer == "" {
t.Error("expected a cluster ID")
}
if repoGC.Error != "" {
t.Error("did not expect any error")
}
if repoGC.Keys == nil {
t.Fatal("expected a non-nil array of IPFSRepoGC")
}
if len(repoGC.Keys) == 0 {
t.Fatal("expected at least one key, but found none")
}
if !repoGC.Keys[0].Key.Equals(test.Cid1) {
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC.Keys[0].Key)
}
}

View File

@ -86,6 +86,8 @@ func textFormatObject(resp interface{}) {
for _, item := range resp.([]*api.Metric) {
textFormatObject(item)
}
case *api.GlobalRepoGC:
textFormatPrintGlobalRepoGC(resp.(*api.GlobalRepoGC))
default:
checkErr("", errors.New("unsupported type returned"))
}
@ -215,6 +217,38 @@ func textFormatPrintMetric(obj *api.Metric) {
fmt.Printf("%s: %s | Expire: %s\n", peer.IDB58Encode(obj.Peer), obj.Value, date)
}
func textFormatPrintGlobalRepoGC(obj *api.GlobalRepoGC) {
peers := make(sort.StringSlice, 0, len(obj.PeerMap))
for peer := range obj.PeerMap {
peers = append(peers, peer)
}
peers.Sort()
for _, peer := range peers {
item := obj.PeerMap[peer]
// If peer name is set, use it instead of peer ID.
if len(item.Peername) > 0 {
peer = item.Peername
}
if item.Error != "" {
fmt.Printf("%-15s | ERROR: %s\n", peer, item.Error)
} else {
fmt.Printf("%-15s\n", peer)
}
fmt.Printf(" > CIDs:\n")
for _, key := range item.Keys {
if key.Error != "" {
// key.Key will be empty
fmt.Printf(" - ERROR: %s\n", key.Error)
continue
}
fmt.Printf(" - %s\n", key.Key)
}
}
}
func textFormatPrintError(obj *api.Error) {
fmt.Printf("An error occurred:\n")
fmt.Printf(" Code: %d\n", obj.Code)

View File

@ -952,6 +952,32 @@ but usually are:
},
},
},
{
Name: "ipfs",
Usage: "Manage IPFS daemon",
Description: "Manage IPFS daemon",
Subcommands: []cli.Command{
{
Name: "gc",
Usage: "run garbage collection on IPFS repos of cluster peers",
Description: `
This command will instruct current Cluster peers to run "repo gc" on their
respective IPFS daemons.
When --local flag is passed, it will garbage collect only on the local IPFS
deamon, otherwise on all IPFS daemons.
`,
Flags: []cli.Flag{
localFlag(),
},
Action: func(c *cli.Context) error {
resp, cerr := globalClient.RepoGC(ctx, c.Bool("local"))
formatResponse(c, resp, cerr)
return nil
},
},
},
},
{
Name: "commands",
Usage: "List all commands",

View File

@ -90,6 +90,8 @@ type IPFSConnector interface {
// RepoStat returns the current repository size and max limit as
// provided by "repo stat".
RepoStat(context.Context) (*api.IPFSRepoStat, error)
// RepoGC performs garbage collection sweep on the IPFS repo.
RepoGC(context.Context) (*api.RepoGC, error)
// Resolve returns a cid given a path.
Resolve(context.Context, string) (cid.Cid, error)
// BlockPut directly adds a block of data to the IPFS repo.

View File

@ -2028,6 +2028,30 @@ func TestClustersDisabledRepinning(t *testing.T) {
}
}
func TestRepoGC(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
f := func(t *testing.T, c *Cluster) {
gRepoGC, err := c.RepoGC(context.Background())
if err != nil {
t.Fatal("gc should have worked:", err)
}
if gRepoGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
if len(gRepoGC.PeerMap) != nClusters {
t.Errorf("expected repo gc information for %d peer", nClusters)
}
for _, repoGC := range gRepoGC.PeerMap {
testRepoGC(t, repoGC)
}
}
runF(t, clusters, f)
}
func TestClustersFollowerMode(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)

View File

@ -23,6 +23,7 @@ const (
DefaultIPFSRequestTimeout = 5 * time.Minute
DefaultPinTimeout = 24 * time.Hour
DefaultUnpinTimeout = 3 * time.Hour
DefaultRepoGCTimeout = 24 * time.Hour
DefaultUnpinDisable = false
)
@ -48,6 +49,8 @@ type Config struct {
// Unpin Operation timeout
UnpinTimeout time.Duration
// RepoGC Operation timeout
RepoGCTimeout time.Duration
// Disables the unpin operation and returns an error.
UnpinDisable bool
@ -61,6 +64,7 @@ type jsonConfig struct {
IPFSRequestTimeout string `json:"ipfs_request_timeout"`
PinTimeout string `json:"pin_timeout"`
UnpinTimeout string `json:"unpin_timeout"`
RepoGCTimeout string `json:"repogc_timeout"`
UnpinDisable bool `json:"unpin_disable,omitempty"`
}
@ -77,6 +81,7 @@ func (cfg *Config) Default() error {
cfg.IPFSRequestTimeout = DefaultIPFSRequestTimeout
cfg.PinTimeout = DefaultPinTimeout
cfg.UnpinTimeout = DefaultUnpinTimeout
cfg.RepoGCTimeout = DefaultRepoGCTimeout
cfg.UnpinDisable = DefaultUnpinDisable
return nil
@ -121,6 +126,11 @@ func (cfg *Config) Validate() error {
if cfg.UnpinTimeout < 0 {
err = errors.New("ipfshttp.unpin_timeout invalid")
}
if cfg.RepoGCTimeout < 0 {
err = errors.New("ipfshttp.repogc_timeout invalid")
}
return err
}
@ -154,6 +164,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
&config.DurationOpt{Duration: jcfg.IPFSRequestTimeout, Dst: &cfg.IPFSRequestTimeout, Name: "ipfs_request_timeout"},
&config.DurationOpt{Duration: jcfg.PinTimeout, Dst: &cfg.PinTimeout, Name: "pin_timeout"},
&config.DurationOpt{Duration: jcfg.UnpinTimeout, Dst: &cfg.UnpinTimeout, Name: "unpin_timeout"},
&config.DurationOpt{Duration: jcfg.RepoGCTimeout, Dst: &cfg.RepoGCTimeout, Name: "repogc_timeout"},
)
if err != nil {
return err
@ -189,6 +200,7 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
jcfg.IPFSRequestTimeout = cfg.IPFSRequestTimeout.String()
jcfg.PinTimeout = cfg.PinTimeout.String()
jcfg.UnpinTimeout = cfg.UnpinTimeout.String()
jcfg.RepoGCTimeout = cfg.RepoGCTimeout.String()
jcfg.UnpinDisable = cfg.UnpinDisable
return

View File

@ -9,11 +9,12 @@ import (
var cfgJSON = []byte(`
{
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"ipfs_request_timeout": "5m0s",
"pin_timeout": "24h",
"unpin_timeout": "3h"
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"ipfs_request_timeout": "5m0s",
"pin_timeout": "24h",
"unpin_timeout": "3h",
"repogc_timeout": "24h"
}
`)

View File

@ -93,6 +93,11 @@ type ipfsResolveResp struct {
Path string
}
type ipfsRepoGCResp struct {
Key cid.Cid
Error string
}
type ipfsRefsResp struct {
Ref string
Err string
@ -757,6 +762,47 @@ func (ipfs *Connector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error)
return &stats, nil
}
// RepoGC performs a garbage collection sweep on the cluster peer's IPFS repo.
func (ipfs *Connector) RepoGC(ctx context.Context) (*api.RepoGC, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoGC")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.RepoGCTimeout)
defer cancel()
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), "repo/gc?stream-errors=true", "", nil)
if err != nil {
logger.Error(err)
return nil, err
}
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
repoGC := &api.RepoGC{
Keys: []api.IPFSRepoGC{},
}
for {
resp := ipfsRepoGCResp{}
if err := dec.Decode(&resp); err != nil {
// If we cancelled the request we should tell the user
// (in case dec.Decode() exited cleanly with an EOF).
select {
case <-ctx.Done():
return repoGC, ctx.Err()
default:
if err == io.EOF {
return repoGC, nil // clean exit
}
logger.Error(err)
return repoGC, err // error decoding
}
}
repoGC.Keys = append(repoGC.Keys, api.IPFSRepoGC{Key: resp.Key, Error: resp.Error})
}
}
// Resolve accepts ipfs or ipns path and resolves it into a cid
func (ipfs *Connector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Resolve")

View File

@ -10,6 +10,7 @@ import (
logging "github.com/ipfs/go-log"
ma "github.com/multiformats/go-multiaddr"
merkledag "github.com/ipfs/go-merkledag"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
)
@ -409,3 +410,39 @@ func TestConfigKey(t *testing.T) {
t.Error("should not work with a bad path")
}
}
func TestRepoGC(t *testing.T) {
ctx := context.Background()
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown(ctx)
res, err := ipfs.RepoGC(ctx)
if err != nil {
t.Fatal(err)
}
if res.Error != "" {
t.Errorf("expected error to be empty: %s", res.Error)
}
if res.Keys == nil {
t.Fatal("expected a non-nil array of IPFSRepoGC")
}
if len(res.Keys) < 5 {
t.Fatal("expected at least five keys")
}
if !res.Keys[0].Key.Equals(test.Cid1) {
t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid1, res.Keys[0].Key)
}
if !res.Keys[3].Key.Equals(test.Cid4) {
t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid4, res.Keys[3].Key)
}
if res.Keys[4].Error != merkledag.ErrLinkNotFound.Error() {
t.Errorf("expected different error, expected: %s, found: %s\n", merkledag.ErrLinkNotFound, res.Keys[4].Error)
}
}

View File

@ -418,6 +418,26 @@ func (rpcapi *ClusterRPCAPI) BlockAllocate(ctx context.Context, in *api.Pin, out
return nil
}
// RepoGC performs garbage collection sweep on all peers' repos.
func (rpcapi *ClusterRPCAPI) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error {
res, err := rpcapi.c.RepoGC(ctx)
if err != nil {
return err
}
*out = *res
return nil
}
// RepoGCLocal performs garbage collection sweep only on the local peer's IPFS daemon.
func (rpcapi *ClusterRPCAPI) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error {
res, err := rpcapi.c.RepoGCLocal(ctx)
if err != nil {
return err
}
*out = *res
return nil
}
// SendInformerMetric runs Cluster.sendInformerMetric().
func (rpcapi *ClusterRPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
m, err := rpcapi.c.sendInformerMetric(ctx)

View File

@ -22,6 +22,8 @@ var DefaultRPCPolicy = map[string]RPCEndpointType{
"Cluster.RecoverAll": RPCClosed,
"Cluster.RecoverAllLocal": RPCTrusted,
"Cluster.RecoverLocal": RPCTrusted,
"Cluster.RepoGC": RPCClosed,
"Cluster.RepoGCLocal": RPCTrusted,
"Cluster.SendInformerMetric": RPCClosed,
"Cluster.Status": RPCClosed,
"Cluster.StatusAll": RPCClosed,

View File

@ -55,7 +55,7 @@ func MultiCancel(cancels []context.CancelFunc) {
}
}
// The copy functions below are used in calls to Cluste.multiRPC()
// The copy functions below are used in calls to Cluster.multiRPC()
// CopyPIDsToIfaces converts a peer.ID slice to an empty interface
// slice using pointers to each elements of the original slice.
@ -114,6 +114,18 @@ func CopyPinInfoSliceToIfaces(in [][]*api.PinInfo) []interface{} {
return ifaces
}
// CopyRepoGCSliceToIfaces converts an api.RepoGC slice to
// an empty interface slice using pointers to each elements of
// the original slice. Useful to handle gorpc.MultiCall() replies.
func CopyRepoGCSliceToIfaces(in []*api.RepoGC) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
in[i] = &api.RepoGC{}
ifaces[i] = in[i]
}
return ifaces
}
// CopyEmptyStructToIfaces converts an empty struct slice to an empty interface
// slice using pointers to each elements of the original slice.
// Useful to handle gorpc.MultiCall() replies.

View File

@ -105,6 +105,11 @@ type mockBlockPutResp struct {
Key string
}
type mockRepoGCResp struct {
Key cid.Cid `json:",omitempty"`
Error string `json:",omitempty"`
}
// NewIpfsMock returns a new mock.
func NewIpfsMock(t *testing.T) *IpfsMock {
store := inmem.New()
@ -379,6 +384,33 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
goto ERROR
}
w.Write(data)
case "repo/gc":
// It assumes `/repo/gc` with parameter `stream-errors=true`
enc := json.NewEncoder(w)
resp := []mockRepoGCResp{
{
Key: Cid1,
},
{
Key: Cid2,
},
{
Key: Cid3,
},
{
Key: Cid4,
},
{
Error: "no link by that name",
},
}
for _, r := range resp {
if err := enc.Encode(&r); err != nil {
goto ERROR
}
}
case "repo/stat":
sizeOnly := r.URL.Query().Get("size-only")
list, err := m.pinMap.List(ctx)

View File

@ -321,6 +321,30 @@ func (mock *mockCluster) BlockAllocate(ctx context.Context, in *api.Pin, out *[]
return nil
}
func (mock *mockCluster) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error {
localrepoGC := &api.RepoGC{}
_ = mock.RepoGCLocal(ctx, struct{}{}, localrepoGC)
*out = api.GlobalRepoGC{
PeerMap: map[string]*api.RepoGC{
peer.IDB58Encode(PeerID1): localrepoGC,
},
}
return nil
}
func (mock *mockCluster) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error {
*out = api.RepoGC{
Peer: PeerID1,
Keys: []api.IPFSRepoGC{
{
Key: Cid1,
},
},
}
return nil
}
func (mock *mockCluster) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
return nil
}