Remove *Serial types. Use pointers for all types.

This takes advantange of the latest features in go-cid, peer.ID and
go-multiaddr and makes the Go types serializable by default.

This means we no longer need to copy between Pin <-> PinSerial, or ID <->
IDSerial etc. We can now efficiently binary-encode these types using short
field keys and without parsing/stringifying (in many cases it just a cast).

We still get the same json output as before (with minor modifications for
Cids).

This should greatly improve Cluster performance and memory usage when dealing
with large collections of items.

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
This commit is contained in:
Hector Sanjuan 2019-02-27 17:04:35 +00:00
parent 041f5e1e93
commit 6447ea51d2
74 changed files with 1474 additions and 2104 deletions

View File

@ -55,7 +55,6 @@ func TestAddPeerDown(t *testing.T) {
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
err := clusters[0].Shutdown(ctx)
if err != nil {
t.Fatal(err)
@ -84,7 +83,6 @@ func TestAddPeerDown(t *testing.T) {
if c.id == clusters[0].id {
return
}
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)

View File

@ -94,7 +94,7 @@ func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (cid.Cid
// FromFiles adds content from a files.Directory. The adder will no longer
// be usable after calling this method.
func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (cid.Cid, error) {
logger.Debugf("adding from files")
logger.Error("adding from files")
a.setContext(ctx)
if a.ctx.Err() != nil { // don't allow running twice

View File

@ -57,7 +57,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
return err
}
nodeSerial := &api.NodeWithMeta{
Cid: node.Cid().String(),
Cid: node.Cid(),
Data: node.RawData(),
CumSize: size,
}
@ -78,7 +78,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, root cid.Cid) (cid.Cid, err
"",
"Cluster",
"Pin",
rootPin.ToSerial(),
rootPin,
&struct{}{},
)
}

View File

@ -7,6 +7,8 @@ import (
"sync"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
adder "github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
@ -19,21 +21,23 @@ type testRPC struct {
pins sync.Map
}
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid, in)
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
rpcs.pins.Store(in.Cid, in)
func (rpcs *testRPC) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
rpcs.pins.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
*out = []string{""}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases.
*out = []peer.ID{test.TestPeerID1}
return nil
}

View File

@ -126,8 +126,8 @@ func makeDAG(ctx context.Context, dagObj map[string]cid.Cid) ([]ipld.Node, error
func putDAG(ctx context.Context, rpcC *rpc.Client, nodes []ipld.Node, dests []peer.ID) error {
for _, n := range nodes {
//logger.Debugf("The dag cbor Node Links: %+v", n.Links())
b := api.NodeWithMeta{
Cid: n.Cid().String(), // Tests depend on this.
b := &api.NodeWithMeta{
Cid: n.Cid(), // Tests depend on this.
Data: n.RawData(),
Format: "cbor",
}

View File

@ -76,7 +76,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
return err
}
nodeSerial := &api.NodeWithMeta{
Cid: node.Cid().String(),
Cid: node.Cid(),
Data: node.RawData(),
CumSize: size,
}
@ -122,7 +122,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
clusterDAGPin.MaxDepth = 0 // pin direct
clusterDAGPin.Name = fmt.Sprintf("%s-clusterDAG", dgs.pinOpts.Name)
clusterDAGPin.Type = api.ClusterDAGType
clusterDAGPin.Reference = dataRoot
clusterDAGPin.Reference = &dataRoot
err = adder.Pin(ctx, dgs.rpcClient, clusterDAGPin)
if err != nil {
return dataRoot, err
@ -131,7 +131,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
// Pin the META pin
metaPin := api.PinWithOpts(dataRoot, dgs.pinOpts)
metaPin.Type = api.MetaType
metaPin.Reference = clusterDAG
metaPin.Reference = &clusterDAG
metaPin.MaxDepth = 0 // irrelevant. Meta-pins are not pinned
err = adder.Pin(ctx, dgs.rpcClient, metaPin)
if err != nil {
@ -180,14 +180,9 @@ func (dgs *DAGService) ingestBlock(ctx context.Context, n *api.NodeWithMeta) err
logger.Debugf("ingesting block %s in shard %d (%s)", n.Cid, len(dgs.shards), dgs.pinOpts.Name)
c, err := cid.Decode(n.Cid)
if err != nil {
return err
}
// add the block to it if it fits and return
if shard.Size()+n.Size() < shard.Limit() {
shard.AddLink(ctx, c, n.Size())
shard.AddLink(ctx, n.Cid, n.Size())
return adder.PutBlock(ctx, dgs.rpcClient, n, shard.Allocations())
}
@ -207,7 +202,7 @@ func (dgs *DAGService) ingestBlock(ctx context.Context, n *api.NodeWithMeta) err
return errors.New("block doesn't fit in empty shard: shard size too small?")
}
_, err = dgs.flushCurrentShard(ctx)
_, err := dgs.flushCurrentShard(ctx)
if err != nil {
return err
}

View File

@ -10,6 +10,7 @@ import (
adder "github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
peer "github.com/libp2p/go-libp2p-peer"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
@ -26,30 +27,32 @@ type testRPC struct {
pins sync.Map
}
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid, in.Data)
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid.String(), in.Data)
return nil
}
func (rpcs *testRPC) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
rpcs.pins.Store(in.Cid, in)
func (rpcs *testRPC) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
rpcs.pins.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
*out = []string{""}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases
*out = []peer.ID{test.TestPeerID1}
return nil
}
func (rpcs *testRPC) PinGet(ctx context.Context, c cid.Cid) (api.Pin, error) {
func (rpcs *testRPC) PinGet(ctx context.Context, c cid.Cid) (*api.Pin, error) {
pI, ok := rpcs.pins.Load(c.String())
if !ok {
return api.Pin{}, errors.New("not found")
return nil, errors.New("not found")
}
return pI.(api.PinSerial).ToPin(), nil
return pI.(*api.Pin), nil
}
func (rpcs *testRPC) BlockGet(ctx context.Context, c cid.Cid) ([]byte, error) {
@ -110,7 +113,7 @@ func TestFromMultipart(t *testing.T) {
// Print all pins
// rpcObj.pins.Range(func(k, v interface{}) bool {
// p := v.(api.PinSerial)
// p := v.(*api.Pin)
// j, _ := config.DefaultJSONMarshal(p)
// fmt.Printf("%s", j)
// return true

View File

@ -92,7 +92,7 @@ func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid,
// this sets allocations as priority allocation
pin.Allocations = sh.allocations
pin.Type = api.ShardType
pin.Reference = prev
pin.Reference = &prev
pin.MaxDepth = 1
pin.ShardSize = sh.Size() // use current size, not the limit
if len(nodes) > len(sh.dagNode)+1 { // using an indirect graph

View File

@ -2,6 +2,7 @@ package sharding
import (
"context"
"errors"
"fmt"
"testing"
@ -13,7 +14,7 @@ import (
// MockPinStore is used in VerifyShards
type MockPinStore interface {
// Gets a pin
PinGet(context.Context, cid.Cid) (api.Pin, error)
PinGet(context.Context, cid.Cid) (*api.Pin, error)
}
// MockBlockStore is used in VerifyShards
@ -36,7 +37,11 @@ func VerifyShards(t *testing.T, rootCid cid.Cid, pins MockPinStore, ipfs MockBlo
return nil, fmt.Errorf("bad MetaPin type")
}
clusterPin, err := pins.PinGet(ctx, metaPin.Reference)
if metaPin.Reference == nil {
return nil, errors.New("MetaPin.Reference is unset")
}
clusterPin, err := pins.PinGet(ctx, *metaPin.Reference)
if err != nil {
return nil, fmt.Errorf("cluster pin was not pinned: %s", err)
}

View File

@ -16,17 +16,12 @@ import (
// PutBlock sends a NodeWithMeta to the given destinations.
func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests []peer.ID) error {
c, err := cid.Decode(n.Cid)
if err != nil {
return err
}
format, ok := cid.CodecToStr[c.Type()]
format, ok := cid.CodecToStr[n.Cid.Type()]
if !ok {
format = ""
logger.Warning("unsupported cid type, treating as v0")
}
if c.Prefix().Version == 0 {
if n.Cid.Prefix().Version == 0 {
format = "v0"
}
n.Format = format
@ -40,7 +35,7 @@ func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests [
dests,
"Cluster",
"IPFSBlockPut",
*n,
n,
rpcutil.RPCDiscardReplies(len(dests)),
)
return rpcutil.CheckErrs(errs)
@ -49,20 +44,20 @@ func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests [
// BlockAllocate helps allocating blocks to peers.
func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions) ([]peer.ID, error) {
// Find where to allocate this file
var allocsStr []string
var allocsStr []peer.ID
err := rpc.CallContext(
ctx,
"",
"Cluster",
"BlockAllocate",
api.PinWithOpts(cid.Undef, pinOpts).ToSerial(),
api.PinWithOpts(cid.Undef, pinOpts),
&allocsStr,
)
return api.StringsToPeers(allocsStr), err
return allocsStr, err
}
// Pin helps sending local RPC pin requests.
func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
func Pin(ctx context.Context, rpc *rpc.Client, pin *api.Pin) error {
if pin.ReplicationFactorMin < 0 {
pin.Allocations = []peer.ID{}
}
@ -72,7 +67,7 @@ func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
"", // use ourself to pin
"Cluster",
"Pin",
pin.ToSerial(),
pin,
&struct{}{},
)
}

View File

@ -7,6 +7,7 @@ import (
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"go.opencensus.io/trace"
"github.com/ipfs/ipfs-cluster/api"
@ -59,13 +60,16 @@ func (c *Cluster) allocate(ctx context.Context, hash cid.Cid, rplMin, rplMax int
}
// Figure out who is holding the CID
currentPin, _ := c.PinGet(ctx, hash)
currentAllocs := currentPin.Allocations
var currentAllocs []peer.ID
currentPin, err := c.PinGet(ctx, hash)
if err == nil {
currentAllocs = currentPin.Allocations
}
metrics := c.monitor.LatestMetrics(ctx, c.informer.Name())
currentMetrics := make(map[peer.ID]api.Metric)
candidatesMetrics := make(map[peer.ID]api.Metric)
priorityMetrics := make(map[peer.ID]api.Metric)
currentMetrics := make(map[peer.ID]*api.Metric)
candidatesMetrics := make(map[peer.ID]*api.Metric)
priorityMetrics := make(map[peer.ID]*api.Metric)
// Divide metrics between current and candidates.
// All metrics in metrics are valid (at least the
@ -123,9 +127,9 @@ func (c *Cluster) obtainAllocations(
ctx context.Context,
hash cid.Cid,
rplMin, rplMax int,
currentValidMetrics map[peer.ID]api.Metric,
candidatesMetrics map[peer.ID]api.Metric,
priorityMetrics map[peer.ID]api.Metric,
currentValidMetrics map[peer.ID]*api.Metric,
candidatesMetrics map[peer.ID]*api.Metric,
priorityMetrics map[peer.ID]*api.Metric,
) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/obtainAllocations")
defer span.End()

View File

@ -39,7 +39,7 @@ func (alloc AscendAllocator) Shutdown(_ context.Context) error { return nil }
func (alloc AscendAllocator) Allocate(
ctx context.Context,
c cid.Cid,
current, candidates, priority map[peer.ID]api.Metric,
current, candidates, priority map[peer.ID]*api.Metric,
) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, false)

View File

@ -12,8 +12,8 @@ import (
)
type testcase struct {
candidates map[peer.ID]api.Metric
current map[peer.ID]api.Metric
candidates map[peer.ID]*api.Metric
current map[peer.ID]*api.Metric
expected []peer.ID
}
@ -29,7 +29,7 @@ var inAMinute = time.Now().Add(time.Minute).UnixNano()
var testCases = []testcase{
{ // regular sort
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "5",
@ -55,11 +55,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1, peer3, peer2, peer0},
},
{ // filter invalid
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "1",
@ -73,11 +73,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad value
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "abc",
@ -91,7 +91,7 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
}

View File

@ -36,7 +36,7 @@ func (alloc DescendAllocator) Shutdown(_ context.Context) error { return nil }
// carry a numeric value such as "used disk". We do not pay attention to
// the metrics of the currently allocated peers and we just sort the
// candidates based on their metric values (largest to smallest).
func (alloc DescendAllocator) Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error) {
func (alloc DescendAllocator) Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]*api.Metric) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, true)
last := util.SortNumeric(candidates, true)

View File

@ -12,8 +12,8 @@ import (
)
type testcase struct {
candidates map[peer.ID]api.Metric
current map[peer.ID]api.Metric
candidates map[peer.ID]*api.Metric
current map[peer.ID]*api.Metric
expected []peer.ID
}
@ -29,7 +29,7 @@ var inAMinute = time.Now().Add(time.Minute).UnixNano()
var testCases = []testcase{
{ // regular sort
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "5",
@ -55,11 +55,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1, peer3, peer2, peer0},
},
{ // filter invalid
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "1",
@ -73,11 +73,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad value
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "abc",
@ -91,7 +91,7 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
}

View File

@ -16,7 +16,7 @@ import (
// SortNumeric returns a list of peers sorted by their metric values. If reverse
// is false (true), peers will be sorted from smallest to largest (largest to
// smallest) metric
func SortNumeric(candidates map[peer.ID]api.Metric, reverse bool) []peer.ID {
func SortNumeric(candidates map[peer.ID]*api.Metric, reverse bool) []peer.ID {
vMap := make(map[peer.ID]uint64)
peers := make([]peer.ID, 0, len(candidates))
for k, v := range candidates {

View File

@ -300,11 +300,15 @@ func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Requ
return
}
var rpcArg interface{} = api.PinCid(c)
if op == "Unpin" {
rpcArg = c
}
err = proxy.rpcClient.Call(
"",
"Cluster",
op,
api.PinCid(c).ToSerial(),
rpcArg,
&struct{}{},
)
if err != nil {
@ -342,23 +346,23 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
ipfsErrorResponder(w, err.Error())
return
}
var pin api.PinSerial
var pin api.Pin
err = proxy.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
c,
&pin,
)
if err != nil {
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg))
return
}
pinLs.Keys[pin.Cid] = ipfsPinType{
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
} else {
pins := make([]api.PinSerial, 0)
pins := make([]*api.Pin, 0)
err := proxy.rpcClient.Call(
"",
"Cluster",
@ -372,7 +376,7 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
}
for _, pin := range pins {
pinLs.Keys[pin.Cid] = ipfsPinType{
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
}
@ -450,7 +454,7 @@ func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
"",
"Cluster",
"Unpin",
api.PinCid(root).ToSerial(),
root,
&struct{}{},
)
if err != nil {
@ -478,10 +482,10 @@ func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers))
defer rpcutil.MultiCancel(cancels)
repoStats := make([]api.IPFSRepoStat, len(peers), len(peers))
repoStats := make([]*api.IPFSRepoStat, len(peers), len(peers))
repoStatsIfaces := make([]interface{}, len(repoStats), len(repoStats))
for i := range repoStats {
repoStatsIfaces[i] = &repoStats[i]
repoStatsIfaces[i] = repoStats[i]
}
errs := proxy.rpcClient.MultiCall(

View File

@ -329,8 +329,6 @@ func TestIPFSProxyPinLs(t *testing.T) {
t.Fatal(err)
}
fmt.Println(string(resBytes))
_, ok := resp.Keys[test.TestCid1]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")

View File

@ -47,12 +47,12 @@ var logger = logging.Logger(loggingFacility)
// metrics and tracing of requests through the API.
type Client interface {
// ID returns information about the cluster Peer.
ID(context.Context) (api.ID, error)
ID(context.Context) (*api.ID, error)
// Peers requests ID information for all cluster peers.
Peers(context.Context) ([]api.ID, error)
Peers(context.Context) ([]*api.ID, error)
// PeerAdd adds a new peer to the cluster.
PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error)
PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error)
// PeerRm removes a current peer from the cluster
PeerRm(ctx context.Context, pid peer.ID) error
@ -68,58 +68,57 @@ type Client interface {
Unpin(ctx context.Context, ci cid.Cid) error
// PinPath resolves given path into a cid and performs the pin operation.
PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error)
PinPath(ctx context.Context, path string, opts api.PinOptions) (*api.Pin, error)
// UnpinPath resolves given path into a cid and performs the unpin operation.
// It returns api.Pin of the given cid before it is unpinned.
UnpinPath(ctx context.Context, path string) (api.Pin, error)
UnpinPath(ctx context.Context, path string) (*api.Pin, error)
// Allocations returns the consensus state listing all tracked items
// and the peers that should be pinning them.
Allocations(ctx context.Context, filter api.PinType) ([]api.Pin, error)
Allocations(ctx context.Context, filter api.PinType) ([]*api.Pin, error)
// Allocation returns the current allocations for a given Cid.
Allocation(ctx context.Context, ci cid.Cid) (api.Pin, error)
Allocation(ctx context.Context, ci cid.Cid) (*api.Pin, error)
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Status(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// StatusAll gathers Status() for all tracked items.
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error)
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]*api.GlobalPinInfo, error)
// Sync makes sure the state of a Cid corresponds to the state reported
// by the ipfs daemon, and returns it. If local is true, this operation
// only happens on the current peer, otherwise it happens on every
// cluster peer.
Sync(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Sync(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// SyncAll triggers Sync() operations for all tracked items. It only
// returns informations for items that were de-synced or have an error
// state. If local is true, the operation is limited to the current
// peer. Otherwise it happens on every cluster peer.
SyncAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error)
SyncAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error)
// Recover retriggers pin or unpin ipfs operations for a Cid in error
// state. If local is true, the operation is limited to the current
// peer, otherwise it happens on every cluster peer.
Recover(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Recover(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// RecoverAll triggers Recover() operations on all tracked items. If
// local is true, the operation is limited to the current peer.
// Otherwise, it happens everywhere.
RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error)
RecoverAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error)
// Version returns the ipfs-cluster peer's version.
Version(context.Context) (api.Version, error)
Version(context.Context) (*api.Version, error)
// IPFS returns an instance of go-ipfs-api's Shell, pointing to a
// Cluster's IPFS proxy endpoint.
IPFS(context.Context) *shell.Shell
// GetConnectGraph returns an ipfs-cluster connection graph. The
// serialized version, strings instead of pids, is returned
GetConnectGraph(context.Context) (api.ConnectGraphSerial, error)
// GetConnectGraph returns an ipfs-cluster connection graph.
GetConnectGraph(context.Context) (*api.ConnectGraph, error)
// Metrics returns a map with the latest metrics of matching name
// for the current cluster peers.
Metrics(ctx context.Context, name string) ([]api.Metric, error)
Metrics(ctx context.Context, name string) ([]*api.Metric, error)
}
// Config allows to configure the parameters to connect

View File

@ -24,27 +24,23 @@ import (
)
// ID returns information about the cluster Peer.
func (c *defaultClient) ID(ctx context.Context) (api.ID, error) {
func (c *defaultClient) ID(ctx context.Context) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/ID")
defer span.End()
var id api.IDSerial
var id api.ID
err := c.do(ctx, "GET", "/id", nil, nil, &id)
return id.ToID(), err
return &id, err
}
// Peers requests ID information for all cluster peers.
func (c *defaultClient) Peers(ctx context.Context) ([]api.ID, error) {
func (c *defaultClient) Peers(ctx context.Context) ([]*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/Peers")
defer span.End()
var ids []api.IDSerial
var ids []*api.ID
err := c.do(ctx, "GET", "/peers", nil, nil, &ids)
result := make([]api.ID, len(ids))
for i, id := range ids {
result[i] = id.ToID()
}
return result, err
return ids, err
}
type peerAddBody struct {
@ -52,7 +48,7 @@ type peerAddBody struct {
}
// PeerAdd adds a new peer to the cluster.
func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/PeerAdd")
defer span.End()
@ -63,9 +59,9 @@ func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error
enc := json.NewEncoder(&buf)
enc.Encode(body)
var id api.IDSerial
var id api.ID
err := c.do(ctx, "POST", "/peers", nil, &buf, &id)
return id.ToID(), err
return &id, err
}
// PeerRm removes a current peer from the cluster
@ -105,14 +101,14 @@ func (c *defaultClient) Unpin(ctx context.Context, ci cid.Cid) error {
}
// PinPath allows to pin an element by the given IPFS path.
func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) {
func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/PinPath")
defer span.End()
var pin api.PinSerial
var pin api.Pin
ipfspath, err := gopath.ParsePath(path)
if err != nil {
return api.Pin{}, err
return nil, err
}
err = c.do(
@ -128,32 +124,32 @@ func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOp
&pin,
)
return pin.ToPin(), err
return &pin, err
}
// UnpinPath allows to unpin an item by providing its IPFS path.
// It returns the unpinned api.Pin information of the resolved Cid.
func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) {
func (c *defaultClient) UnpinPath(ctx context.Context, p string) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/UnpinPath")
defer span.End()
var pin api.PinSerial
var pin api.Pin
ipfspath, err := gopath.ParsePath(p)
if err != nil {
return api.Pin{}, err
return nil, err
}
err = c.do(ctx, "DELETE", fmt.Sprintf("/pins%s", ipfspath.String()), nil, nil, &pin)
return pin.ToPin(), err
return &pin, err
}
// Allocations returns the consensus state listing all tracked items and
// the peers that should be pinning them.
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]api.Pin, error) {
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Allocations")
defer span.End()
var pins []api.PinSerial
var pins []*api.Pin
types := []api.PinType{
api.DataType,
@ -176,33 +172,36 @@ func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]
f := url.QueryEscape(strings.Join(strFilter, ","))
err := c.do(ctx, "GET", fmt.Sprintf("/allocations?filter=%s", f), nil, nil, &pins)
result := make([]api.Pin, len(pins))
for i, p := range pins {
result[i] = p.ToPin()
}
return result, err
return pins, err
}
// Allocation returns the current allocations for a given Cid.
func (c *defaultClient) Allocation(ctx context.Context, ci cid.Cid) (api.Pin, error) {
func (c *defaultClient) Allocation(ctx context.Context, ci cid.Cid) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Allocation")
defer span.End()
var pin api.PinSerial
var pin api.Pin
err := c.do(ctx, "GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
return pin.ToPin(), err
return &pin, err
}
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Status")
defer span.End()
var gpi api.GlobalPinInfoSerial
err := c.do(ctx, "GET", fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
var gpi api.GlobalPinInfo
err := c.do(
ctx,
"GET",
fmt.Sprintf("/pins/%s?local=%t", ci.String(), local),
nil,
nil,
&gpi,
)
return &gpi, err
}
// StatusAll gathers Status() for all tracked items. If a filter is
@ -210,11 +209,11 @@ func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api
// will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all.
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/StatusAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
filterStr := ""
if filter != api.TrackerStatusUndefined { // undefined filter means "all"
@ -224,102 +223,104 @@ func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus,
}
}
err := c.do(ctx, "GET", fmt.Sprintf("/pins?local=%t&filter=%s", local, url.QueryEscape(filterStr)), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
err := c.do(
ctx,
"GET",
fmt.Sprintf("/pins?local=%t&filter=%s", local, url.QueryEscape(filterStr)),
nil,
nil,
&gpis,
)
return gpis, err
}
// Sync makes sure the state of a Cid corresponds to the state reported by
// the ipfs daemon, and returns it. If local is true, this operation only
// happens on the current peer, otherwise it happens on every cluster peer.
func (c *defaultClient) Sync(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Sync(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Sync")
defer span.End()
var gpi api.GlobalPinInfoSerial
err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
var gpi api.GlobalPinInfo
err := c.do(
ctx,
"POST",
fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local),
nil,
nil,
&gpi,
)
return &gpi, err
}
// SyncAll triggers Sync() operations for all tracked items. It only returns
// informations for items that were de-synced or have an error state. If
// local is true, the operation is limited to the current peer. Otherwise
// it happens on every cluster peer.
func (c *defaultClient) SyncAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) SyncAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/SyncAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/sync?local=%t", local), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
return gpis, err
}
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
// If local is true, the operation is limited to the current peer, otherwise
// it happens on every cluster peer.
func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Recover")
defer span.End()
var gpi api.GlobalPinInfoSerial
var gpi api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
return &gpi, err
}
// RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens
// everywhere.
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/RecoverAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
return gpis, err
}
// Version returns the ipfs-cluster peer's version.
func (c *defaultClient) Version(ctx context.Context) (api.Version, error) {
func (c *defaultClient) Version(ctx context.Context) (*api.Version, error) {
ctx, span := trace.StartSpan(ctx, "client/Version")
defer span.End()
var ver api.Version
err := c.do(ctx, "GET", "/version", nil, nil, &ver)
return ver, err
return &ver, err
}
// GetConnectGraph returns an ipfs-cluster connection graph.
// The serialized version, strings instead of pids, is returned
func (c *defaultClient) GetConnectGraph(ctx context.Context) (api.ConnectGraphSerial, error) {
func (c *defaultClient) GetConnectGraph(ctx context.Context) (*api.ConnectGraph, error) {
ctx, span := trace.StartSpan(ctx, "client/GetConnectGraph")
defer span.End()
var graphS api.ConnectGraphSerial
err := c.do(ctx, "GET", "/health/graph", nil, nil, &graphS)
return graphS, err
var graph api.ConnectGraph
err := c.do(ctx, "GET", "/health/graph", nil, nil, &graph)
return &graph, err
}
// Metrics returns a map with the latest valid metrics of the given name
// for the current cluster peers.
func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) {
func (c *defaultClient) Metrics(ctx context.Context, name string) ([]*api.Metric, error) {
ctx, span := trace.StartSpan(ctx, "client/Metrics")
defer span.End()
if name == "" {
return nil, errors.New("bad metric name")
}
var metrics []api.Metric
var metrics []*api.Metric
err := c.do(ctx, "GET", fmt.Sprintf("/monitor/metrics/%s", name), nil, nil, &metrics)
return metrics, err
}
@ -332,7 +333,7 @@ func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric,
// peers have transitioned to the target TrackerStatus or are Remote.
// If an error of some type happens, WaitFor returns immediately with an
// empty GlobalPinInfo.
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPinInfo, error) {
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/WaitFor")
defer span.End()
@ -344,14 +345,14 @@ func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPi
go sf.pollStatus(ctx, c, fp)
go sf.filter(ctx, fp)
var status api.GlobalPinInfo
var status *api.GlobalPinInfo
for {
select {
case <-ctx.Done():
return api.GlobalPinInfo{}, ctx.Err()
return nil, ctx.Err()
case err := <-sf.Err:
return api.GlobalPinInfo{}, err
return nil, err
case st, ok := <-sf.Out:
if !ok { // channel closed
return status, nil
@ -371,15 +372,15 @@ type StatusFilterParams struct {
}
type statusFilter struct {
In, Out chan api.GlobalPinInfo
In, Out chan *api.GlobalPinInfo
Done chan struct{}
Err chan error
}
func newStatusFilter() *statusFilter {
return &statusFilter{
In: make(chan api.GlobalPinInfo),
Out: make(chan api.GlobalPinInfo),
In: make(chan *api.GlobalPinInfo),
Out: make(chan *api.GlobalPinInfo),
Done: make(chan struct{}),
Err: make(chan error),
}
@ -437,7 +438,7 @@ func (sf *statusFilter) pollStatus(ctx context.Context, c Client, fp StatusFilte
}
}
func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo) (bool, error) {
func statusReached(target api.TrackerStatus, gblPinInfo *api.GlobalPinInfo) (bool, error) {
for _, pinInfo := range gblPinInfo.PeerMap {
switch pinInfo.Status {
case target:

View File

@ -96,7 +96,7 @@ func TestPeersWithError(t *testing.T) {
if err == nil {
t.Fatal("expected error")
}
if ids == nil || len(ids) != 0 {
if ids != nil {
t.Fatal("expected no ids")
}
}
@ -234,8 +234,8 @@ func TestPinPath(t *testing.T) {
if !pin.Equals(resultantPin) {
t.Errorf("expected different pin: %s", p)
t.Errorf("expected: %+v", resultantPin.ToSerial())
t.Errorf("actual: %+v", pin.ToSerial())
t.Errorf("expected: %+v", resultantPin)
t.Errorf("actual: %+v", pin)
}
}
@ -490,53 +490,52 @@ type waitService struct {
pinStart time.Time
}
func (wait *waitService) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (wait *waitService) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
wait.l.Lock()
defer wait.l.Unlock()
wait.pinStart = time.Now()
return nil
}
func (wait *waitService) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (wait *waitService) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
wait.l.Lock()
defer wait.l.Unlock()
c1, _ := cid.Decode(in.Cid)
if time.Now().After(wait.pinStart.Add(5 * time.Second)) { //pinned
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
test.TestPeerID1: {
Cid: c1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.TestPeerID1): {
Cid: in,
Peer: test.TestPeerID1,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
test.TestPeerID2: {
Cid: c1,
peer.IDB58Encode(test.TestPeerID2): {
Cid: in,
Peer: test.TestPeerID2,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
},
}.ToSerial()
}
} else { // pinning
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
test.TestPeerID1: {
Cid: c1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.TestPeerID1): {
Cid: in,
Peer: test.TestPeerID1,
Status: api.TrackerStatusPinning,
TS: wait.pinStart,
},
test.TestPeerID2: {
Cid: c1,
peer.IDB58Encode(test.TestPeerID2): {
Cid: in,
Peer: test.TestPeerID2,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
},
}.ToSerial()
}
}
return nil

View File

@ -301,7 +301,7 @@ func basicAuthHandler(credentials map[string]string, h http.Handler) http.Handle
}
func unauthorizedResp() (string, error) {
apiError := types.Error{
apiError := &types.Error{
Code: 401,
Message: "Unauthorized",
}
@ -526,17 +526,17 @@ func (api *API) SetClient(c *rpc.Client) {
}
func (api *API) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := types.IDSerial{}
var id types.ID
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"ID",
struct{}{},
&idSerial,
&id,
)
api.sendResponse(w, autoStatus, err, idSerial)
api.sendResponse(w, autoStatus, err, &id)
}
func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) {
@ -554,7 +554,7 @@ func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) graphHandler(w http.ResponseWriter, r *http.Request) {
var graph types.ConnectGraphSerial
var graph types.ConnectGraph
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -570,7 +570,7 @@ func (api *API) metricsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
var metrics []types.Metric
var metrics []*types.Metric
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -610,18 +610,22 @@ func (api *API) addHandler(w http.ResponseWriter, r *http.Request) {
return
}
func init() {
// logging.SetLogLevel("*", "debug")
}
func (api *API) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []types.IDSerial
var peers []*types.ID
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Peers",
struct{}{},
&peersSerial,
&peers,
)
api.sendResponse(w, autoStatus, err, peersSerial)
api.sendResponse(w, autoStatus, err, peers)
}
func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
@ -635,22 +639,22 @@ func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
return
}
_, err = peer.IDB58Decode(addInfo.PeerID)
pid, err := peer.IDB58Decode(addInfo.PeerID)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding peer_id"), nil)
return
}
var ids types.IDSerial
var id types.ID
err = api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PeerAdd",
addInfo.PeerID,
&ids,
pid,
&id,
)
api.sendResponse(w, autoStatus, err, ids)
api.sendResponse(w, autoStatus, err, &id)
}
func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
@ -668,15 +672,15 @@ func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
logger.Debugf("rest api pinHandler: %s", ps.Cid)
// span.AddAttributes(trace.StringAttribute("cid", ps.Cid))
if pin := api.parseCidOrError(w, r); pin != nil {
logger.Debugf("rest api pinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Pin",
ps,
pin,
&struct{}{},
)
api.sendResponse(w, http.StatusAccepted, err, nil)
@ -685,15 +689,15 @@ func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
logger.Debugf("rest api unpinHandler: %s", ps.Cid)
// span.AddAttributes(trace.StringAttribute("cid", ps.Cid))
if pin := api.parseCidOrError(w, r); pin != nil {
logger.Debugf("rest api unpinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Unpin",
ps,
pin.Cid,
&struct{}{},
)
api.sendResponse(w, http.StatusAccepted, err, nil)
@ -702,8 +706,8 @@ func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.PinSerial
if pinpath := api.parsePinPathOrError(w, r); pinpath.Path != "" {
var pin types.Pin
if pinpath := api.parsePinPathOrError(w, r); pinpath != nil {
logger.Debugf("rest api pinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
@ -720,8 +724,8 @@ func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) unpinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.PinSerial
if pinpath := api.parsePinPathOrError(w, r); pinpath.Path != "" {
var pin types.Pin
if pinpath := api.parsePinPathOrError(w, r); pinpath != nil {
logger.Debugf("rest api unpinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
@ -743,7 +747,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
for _, f := range strings.Split(filterStr, ",") {
filter |= types.PinTypeFromString(f)
}
var pins []types.PinSerial
var pins []*types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -752,50 +756,50 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
struct{}{},
&pins,
)
outPins := make([]types.PinSerial, 0)
for _, pinS := range pins {
if uint64(filter)&pinS.Type > 0 {
outPins := make([]*types.Pin, 0)
for _, pin := range pins {
if filter&pin.Type > 0 {
// add this pin to output
outPins = append(outPins, pinS)
outPins = append(outPins, pin)
}
}
api.sendResponse(w, autoStatus, err, outPins)
}
func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
var pin types.PinSerial
if pin := api.parseCidOrError(w, r); pin != nil {
var pinResp types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PinGet",
ps,
&pin,
pin.Cid,
&pinResp,
)
if err != nil { // errors here are 404s
api.sendResponse(w, http.StatusNotFound, err, nil)
return
}
api.sendResponse(w, autoStatus, nil, pin)
api.sendResponse(w, autoStatus, nil, pinResp)
}
}
// filterGlobalPinInfos takes a GlobalPinInfo slice and discards
// any item in it which does not carry a PinInfo matching the
// filter (OR-wise).
func filterGlobalPinInfos(globalPinInfos []types.GlobalPinInfoSerial, filter types.TrackerStatus) []types.GlobalPinInfoSerial {
func filterGlobalPinInfos(globalPinInfos []*types.GlobalPinInfo, filter types.TrackerStatus) []*types.GlobalPinInfo {
if filter == types.TrackerStatusUndefined {
return globalPinInfos
}
var filteredGlobalPinInfos []types.GlobalPinInfoSerial
var filteredGlobalPinInfos []*types.GlobalPinInfo
for _, globalPinInfo := range globalPinInfos {
for _, pinInfo := range globalPinInfo.PeerMap {
st := types.TrackerStatusFromString(pinInfo.Status)
// silenced the error because we should have detected earlier if filters were invalid
if st.Match(filter) {
// silenced the error because we should have detected
// earlier if filters were invalid
if pinInfo.Status.Match(filter) {
filteredGlobalPinInfos = append(filteredGlobalPinInfos, globalPinInfo)
break
}
@ -809,7 +813,7 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
var globalPinInfos []types.GlobalPinInfoSerial
var globalPinInfos []*types.GlobalPinInfo
filterStr := queryValues.Get("filter")
filter := types.TrackerStatusFromString(filterStr)
@ -819,7 +823,7 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
}
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
@ -858,26 +862,26 @@ func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"StatusLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Status",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -890,7 +894,7 @@ func (api *API) syncAllHandler(w http.ResponseWriter, r *http.Request) {
local := queryValues.Get("local")
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -901,7 +905,7 @@ func (api *API) syncAllHandler(w http.ResponseWriter, r *http.Request) {
)
api.sendResponse(w, autoStatus, err, pinInfosToGlobal(pinInfos))
} else {
var pinInfos []types.GlobalPinInfoSerial
var pinInfos []*types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -918,26 +922,26 @@ func (api *API) syncHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"SyncLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Sync",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -949,7 +953,7 @@ func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -968,26 +972,26 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RecoverLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Recover",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -995,37 +999,34 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (api *API) parsePinPathOrError(w http.ResponseWriter, r *http.Request) types.PinPath {
func (api *API) parsePinPathOrError(w http.ResponseWriter, r *http.Request) *types.PinPath {
vars := mux.Vars(r)
urlpath := "/" + vars["keyType"] + "/" + strings.TrimSuffix(vars["path"], "/")
path, err := gopath.ParsePath(urlpath)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error parsing path: "+err.Error()), nil)
return types.PinPath{}
return nil
}
pinPath := types.PinPath{Path: path.String()}
pinPath := &types.PinPath{Path: path.String()}
pinPath.PinOptions.FromQuery(r.URL.Query())
return pinPath
}
func (api *API) parseCidOrError(w http.ResponseWriter, r *http.Request) types.PinSerial {
func (api *API) parseCidOrError(w http.ResponseWriter, r *http.Request) *types.Pin {
vars := mux.Vars(r)
hash := vars["hash"]
_, err := cid.Decode(hash)
c, err := cid.Decode(hash)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding Cid: "+err.Error()), nil)
return types.PinSerial{Cid: ""}
return nil
}
pin := types.PinSerial{
Cid: hash,
Type: uint64(types.DataType),
}
pin.PinOptions.FromQuery(r.URL.Query())
opts := types.PinOptions{}
opts.FromQuery(r.URL.Query())
pin := types.PinWithOpts(c, opts)
pin.MaxDepth = -1 // For now, all pins are recursive
return pin
}
@ -1041,17 +1042,17 @@ func (api *API) parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID
return pid
}
func pinInfoToGlobal(pInfo types.PinInfoSerial) types.GlobalPinInfoSerial {
return types.GlobalPinInfoSerial{
func pinInfoToGlobal(pInfo *types.PinInfo) *types.GlobalPinInfo {
return &types.GlobalPinInfo{
Cid: pInfo.Cid,
PeerMap: map[string]types.PinInfoSerial{
pInfo.Peer: pInfo,
PeerMap: map[string]*types.PinInfo{
peer.IDB58Encode(pInfo.Peer): pInfo,
},
}
}
func pinInfosToGlobal(pInfos []types.PinInfoSerial) []types.GlobalPinInfoSerial {
gPInfos := make([]types.GlobalPinInfoSerial, len(pInfos), len(pInfos))
func pinInfosToGlobal(pInfos []*types.PinInfo) []*types.GlobalPinInfo {
gPInfos := make([]*types.GlobalPinInfo, len(pInfos), len(pInfos))
for i, p := range pInfos {
gPInfos[i] = pinInfoToGlobal(p)
}

View File

@ -98,7 +98,6 @@ func processResp(t *testing.T, httpResp *http.Response, err error, resp interfac
if err != nil {
t.Fatal("error reading body: ", err)
}
if len(body) != 0 {
err = json.Unmarshal(body, resp)
if err != nil {
@ -307,17 +306,17 @@ func TestRestAPIIDEndpoint(t *testing.T) {
defer httpsrest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
makeGet(t, rest, url(rest)+"/id", &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.TestPeerID1.Pretty() {
t.Error("expected correct id")
}
}
httpstf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
makeGet(t, httpsrest, url(httpsrest)+"/id", &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.TestPeerID1.Pretty() {
t.Error("expected correct id")
}
}
@ -348,12 +347,12 @@ func TestAPIPeerstEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var list []api.IDSerial
var list []*api.ID
makeGet(t, rest, url(rest)+"/peers", &list)
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID != test.TestPeerID1.Pretty() {
if list[0].ID.Pretty() != test.TestPeerID1.Pretty() {
t.Error("expected a different peer id list: ", list)
}
}
@ -367,13 +366,12 @@ func TestAPIPeerAddEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
// post with valid body
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", test.TestPeerID1.Pretty())
t.Log(body)
makePost(t, rest, url(rest)+"/peers", []byte(body), &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.TestPeerID1.Pretty() {
t.Error("expected correct ID")
}
if id.Error != "" {
@ -532,9 +530,9 @@ func TestConnectGraphEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var cg api.ConnectGraphSerial
var cg api.ConnectGraph
makeGet(t, rest, url(rest)+"/health/graph", &cg)
if cg.ClusterID != test.TestPeerID1.Pretty() {
if cg.ClusterID.Pretty() != test.TestPeerID1.Pretty() {
t.Error("unexpected cluster id")
}
if len(cg.IPFSLinks) != 3 {
@ -547,12 +545,12 @@ func TestConnectGraphEndpoint(t *testing.T) {
t.Error("unexpected number of cluster to ipfs links")
}
// test a few link values
pid1 := test.TestPeerID1.Pretty()
pid4 := test.TestPeerID4.Pretty()
if _, ok := cg.ClustertoIPFS[pid1]; !ok {
pid1 := test.TestPeerID1
pid4 := test.TestPeerID4
if _, ok := cg.ClustertoIPFS[peer.IDB58Encode(pid1)]; !ok {
t.Fatal("missing cluster peer 1 from cluster to peer links map")
}
if cg.ClustertoIPFS[pid1] != pid4 {
if cg.ClustertoIPFS[peer.IDB58Encode(pid1)] != pid4 {
t.Error("unexpected ipfs peer mapped to cluster peer 1 in graph")
}
}
@ -645,10 +643,10 @@ func TestAPIPinEndpointWithPath(t *testing.T) {
}
continue
}
pin := api.PinSerial{}
pin := api.Pin{}
makePost(t, rest, url(rest)+"/pins"+testCase.WithQuery(), []byte{}, &pin)
if !pin.ToPin().Equals(resultantPin) {
t.Errorf("expected different pin,\n expected: %+v,\n actual: %+v,\n path: %s\n", resultantPin.ToSerial(), pin, testCase.path)
if !pin.Equals(resultantPin) {
t.Errorf("expected different pin,\n expected: %+v,\n actual: %+v,\n path: %s\n", resultantPin, pin, testCase.path)
}
}
}
@ -695,9 +693,9 @@ func TestAPIUnpinEndpointWithPath(t *testing.T) {
}
continue
}
pin := api.PinSerial{}
pin := api.Pin{}
makeDelete(t, rest, url(rest)+"/pins"+testCase.path, &pin)
if pin.Cid != test.TestCidResolved {
if pin.Cid.String() != test.TestCidResolved {
t.Errorf("expected different cid, expected: %s, actual: %s, path: %s\n", test.TestCidResolved, pin.Cid, testCase.path)
}
}
@ -712,11 +710,11 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.PinSerial
var resp []*api.Pin
makeGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 || resp[1].Cid != test.TestCid2 ||
resp[2].Cid != test.TestCid3 {
resp[0].Cid.String() != test.TestCid1 || resp[1].Cid.String() != test.TestCid2 ||
resp[2].Cid.String() != test.TestCid3 {
t.Error("unexpected pin list: ", resp)
}
}
@ -730,10 +728,10 @@ func TestAPIAllocationEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.PinSerial
var resp api.Pin
makeGet(t, rest, url(rest)+"/allocations/"+test.TestCid1, &resp)
if resp.Cid != test.TestCid1 {
t.Error("cid should be the same")
if resp.Cid.String() != test.TestCid1 {
t.Errorf("cid should be the same: %s %s", resp.Cid, test.TestCid1)
}
errResp := api.Error{}
@ -752,7 +750,7 @@ func TestAPIMetricsEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.MetricSerial
var resp []*api.Metric
makeGet(t, rest, url(rest)+"/monitor/metrics/somemetricstype", &resp)
if len(resp) == 0 {
t.Fatal("No metrics found")
@ -761,7 +759,7 @@ func TestAPIMetricsEndpoint(t *testing.T) {
if m.Name != "test" {
t.Error("Unexpected metric name: ", m.Name)
}
if m.Peer != test.TestPeerID1.Pretty() {
if m.Peer.Pretty() != test.TestPeerID1.Pretty() {
t.Error("Unexpected peer id: ", m.Peer)
}
}
@ -776,47 +774,54 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins", &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusAll resp:\n %+v", resp)
resp[0].Cid.String() != test.TestCid1 ||
resp[1].PeerMap[peer.IDB58Encode(test.TestPeerID1)].Status.String() != "pinning" {
t.Errorf("unexpected statusAll resp")
for _, gpi := range resp {
t.Errorf("%s:\n", gpi.Cid)
for k, v := range gpi.PeerMap {
t.Errorf("%s: %+v\n", k, v)
}
}
}
// Test local=true
var resp2 []api.GlobalPinInfoSerial
var resp2 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?local=true", &resp2)
if len(resp2) != 2 {
t.Errorf("unexpected statusAll+local resp:\n %+v", resp2)
}
// Test with filter
var resp3 []api.GlobalPinInfoSerial
var resp3 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=queued", &resp3)
if len(resp3) != 0 {
t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3)
}
var resp4 []api.GlobalPinInfoSerial
var resp4 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4)
if len(resp4) != 1 {
t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4)
}
var resp5 []api.GlobalPinInfoSerial
var resp5 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5)
if len(resp5) != 1 {
t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5)
}
var resp6 []api.GlobalPinInfoSerial
var resp6 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=error", &resp6)
if len(resp6) != 1 {
t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6)
}
var resp7 []api.GlobalPinInfoSerial
var resp7 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7)
if len(resp7) != 2 {
t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7)
@ -832,32 +837,32 @@ func TestAPIStatusEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
var resp api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins/"+test.TestCid1, &resp)
if resp.Cid != test.TestCid1 {
if resp.Cid.String() != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.TestPeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
// Test local=true
var resp2 api.GlobalPinInfoSerial
var resp2 api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins/"+test.TestCid1+"?local=true", &resp2)
if resp2.Cid != test.TestCid1 {
if resp2.Cid.String() != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[test.TestPeerID2.Pretty()]
info, ok = resp2.PeerMap[peer.IDB58Encode(test.TestPeerID2)]
if !ok {
t.Fatal("expected info for test.TestPeerID2")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -871,17 +876,17 @@ func TestAPISyncAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/sync", []byte{}, &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
resp[0].Cid.String() != test.TestCid1 ||
resp[1].PeerMap[peer.IDB58Encode(test.TestPeerID1)].Status.String() != "pinning" {
t.Errorf("unexpected syncAll resp:\n %+v", resp)
}
// Test local=true
var resp2 []api.GlobalPinInfoSerial
var resp2 []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/sync?local=true", []byte{}, &resp2)
if len(resp2) != 2 {
@ -898,32 +903,32 @@ func TestAPISyncEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
var resp api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/sync", []byte{}, &resp)
if resp.Cid != test.TestCid1 {
if resp.Cid.String() != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.TestPeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
// Test local=true
var resp2 api.GlobalPinInfoSerial
var resp2 api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/sync?local=true", []byte{}, &resp2)
if resp2.Cid != test.TestCid1 {
if resp2.Cid.String() != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[test.TestPeerID2.Pretty()]
info, ok = resp2.PeerMap[peer.IDB58Encode(test.TestPeerID2)]
if !ok {
t.Fatal("expected info for test.TestPeerID2")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -937,17 +942,17 @@ func TestAPIRecoverEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
var resp api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/recover", []byte{}, &resp)
if resp.Cid != test.TestCid1 {
if resp.Cid.String() != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.TestPeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -961,7 +966,7 @@ func TestAPIRecoverAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/recover?local=true", []byte{}, &resp)
if len(resp) != 0 {

View File

@ -9,7 +9,6 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
@ -26,7 +25,7 @@ import (
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
multiaddr "github.com/multiformats/go-multiaddr"
// needed to parse /ws multiaddresses
_ "github.com/libp2p/go-ws-transport"
@ -130,6 +129,23 @@ func (st TrackerStatus) Match(filter TrackerStatus) bool {
return filter == 0 || st&filter > 0
}
// MarshalJSON uses the string representation of TrackerStatus for JSON
// encoding.
func (st TrackerStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(st.String())
}
// UnmarshalJSON sets a tracker status from its JSON representation.
func (st *TrackerStatus) UnmarshalJSON(data []byte) error {
var v string
err := json.Unmarshal(data, &v)
if err != nil {
return err
}
*st = TrackerStatusFromString(v)
return nil
}
// TrackerStatusFromString parses a string and returns the matching
// TrackerStatus value. The string can be a comma-separated list
// representing a TrackerStatus filter. Unknown status names are
@ -225,157 +241,26 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid cid.Cid
PeerMap map[peer.ID]PinInfo
}
// GlobalPinInfoSerial is the serializable version of GlobalPinInfo.
type GlobalPinInfoSerial struct {
Cid string `json:"cid"`
PeerMap map[string]PinInfoSerial `json:"peer_map"`
}
// ToSerial converts a GlobalPinInfo to its serializable version.
func (gpi GlobalPinInfo) ToSerial() GlobalPinInfoSerial {
s := GlobalPinInfoSerial{}
if gpi.Cid.Defined() {
s.Cid = gpi.Cid.String()
}
s.PeerMap = make(map[string]PinInfoSerial)
for k, v := range gpi.PeerMap {
s.PeerMap[peer.IDB58Encode(k)] = v.ToSerial()
}
return s
}
// ToGlobalPinInfo converts a GlobalPinInfoSerial to its native version.
func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
c, err := cid.Decode(gpis.Cid)
if err != nil {
logger.Debug(gpis.Cid, err)
}
gpi := GlobalPinInfo{
Cid: c,
PeerMap: make(map[peer.ID]PinInfo),
}
for k, v := range gpis.PeerMap {
p, err := peer.IDB58Decode(k)
if err != nil {
logger.Error(k, err)
}
gpi.PeerMap[p] = v.ToPinInfo()
}
return gpi
Cid cid.Cid `json:"cid" codec:"c,omitempty"`
// https://github.com/golang/go/issues/28827
// Peer IDs are of string Kind(). We can't use peer IDs here
// as Go ignores TextMarshaler.
PeerMap map[string]*PinInfo `json:"peer_map" codec:"pm,omitempty"`
}
// PinInfo holds information about local pins.
type PinInfo struct {
Cid cid.Cid
Peer peer.ID
PeerName string
Status TrackerStatus
TS time.Time
Error string
}
// PinInfoSerial is a serializable version of PinInfo.
// information is marked as
type PinInfoSerial struct {
Cid string `json:"cid"`
Peer string `json:"peer"`
PeerName string `json:"peername"`
Status string `json:"status"`
TS string `json:"timestamp"`
Error string `json:"error"`
}
// ToSerial converts a PinInfo to its serializable version.
func (pi PinInfo) ToSerial() PinInfoSerial {
c := ""
if pi.Cid.Defined() {
c = pi.Cid.String()
}
p := ""
if pi.Peer != "" {
p = peer.IDB58Encode(pi.Peer)
}
return PinInfoSerial{
Cid: c,
Peer: p,
PeerName: pi.PeerName,
Status: pi.Status.String(),
TS: pi.TS.UTC().Format(time.RFC3339),
Error: pi.Error,
}
}
// ToPinInfo converts a PinInfoSerial to its native version.
func (pis PinInfoSerial) ToPinInfo() PinInfo {
c, err := cid.Decode(pis.Cid)
if err != nil {
logger.Debug(pis.Cid, err)
}
p, err := peer.IDB58Decode(pis.Peer)
if err != nil {
logger.Debug(pis.Peer, err)
}
ts, err := time.Parse(time.RFC3339, pis.TS)
if err != nil {
logger.Debug(pis.TS, err)
}
return PinInfo{
Cid: c,
Peer: p,
PeerName: pis.PeerName,
Status: TrackerStatusFromString(pis.Status),
TS: ts,
Error: pis.Error,
}
Cid cid.Cid `json:"cid" codec:"c,omitempty"`
Peer peer.ID `json:"peer" codec:"p,omitempty"`
PeerName string `json:"peername" codec:"pn,omitempty"`
Status TrackerStatus `json:"status" codec:"st,omitempty"`
TS time.Time `json:"timestamp" codec:"ts,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
}
// Version holds version information
type Version struct {
Version string `json:"Version"`
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
Error string `json:"error"`
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
p := ""
if id.ID != "" {
p = peer.IDB58Encode(id.ID)
}
return IPFSIDSerial{
ID: p,
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToIPFSID converts an IPFSIDSerial to IPFSID
func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
Version string `json:"Version" codec:"v,omitempty"`
}
// ConnectGraph holds information about the connectivity of the cluster
@ -391,236 +276,72 @@ func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
// then id will be a key of IPFSLinks. In the event of a SwarmPeers error
// IPFSLinks[id] == [].
type ConnectGraph struct {
ClusterID peer.ID
IPFSLinks map[peer.ID][]peer.ID // ipfs to ipfs links
ClusterLinks map[peer.ID][]peer.ID // cluster to cluster links
ClustertoIPFS map[peer.ID]peer.ID // cluster to ipfs links
ClusterID peer.ID
// ipfs to ipfs links
IPFSLinks map[string][]peer.ID `json:"ipfs_links" codec:"il,omitempty"`
// cluster to cluster links
ClusterLinks map[string][]peer.ID `json:"cluster_links" codec:"cl,omitempty"`
// cluster to ipfs links
ClustertoIPFS map[string]peer.ID `json:"cluster_to_ipfs" codec:"ci,omitempty"`
}
// ConnectGraphSerial is the serializable ConnectGraph counterpart for RPC requests
type ConnectGraphSerial struct {
ClusterID string
IPFSLinks map[string][]string `json:"ipfs_links"`
ClusterLinks map[string][]string `json:"cluster_links"`
ClustertoIPFS map[string]string `json:"cluster_to_ipfs"`
// Multiaddr is a utility type wrapping a Multiaddress
type Multiaddr struct {
multiaddr.Multiaddr
}
// ToSerial converts a ConnectGraph to its Go-serializable version
func (cg ConnectGraph) ToSerial() ConnectGraphSerial {
IPFSLinksSerial := serializeLinkMap(cg.IPFSLinks)
ClusterLinksSerial := serializeLinkMap(cg.ClusterLinks)
ClustertoIPFSSerial := make(map[string]string)
for k, v := range cg.ClustertoIPFS {
ClustertoIPFSSerial[peer.IDB58Encode(k)] = peer.IDB58Encode(v)
}
return ConnectGraphSerial{
ClusterID: peer.IDB58Encode(cg.ClusterID),
IPFSLinks: IPFSLinksSerial,
ClusterLinks: ClusterLinksSerial,
ClustertoIPFS: ClustertoIPFSSerial,
}
func NewMultiaddr(mstr string) (Multiaddr, error) {
m, err := multiaddr.NewMultiaddr(mstr)
return Multiaddr{Multiaddr: m}, err
}
// ToConnectGraph converts a ConnectGraphSerial to a ConnectGraph
func (cgs ConnectGraphSerial) ToConnectGraph() ConnectGraph {
ClustertoIPFS := make(map[peer.ID]peer.ID)
for k, v := range cgs.ClustertoIPFS {
pid1, _ := peer.IDB58Decode(k)
pid2, _ := peer.IDB58Decode(v)
ClustertoIPFS[pid1] = pid2
}
pid, _ := peer.IDB58Decode(cgs.ClusterID)
return ConnectGraph{
ClusterID: pid,
IPFSLinks: deserializeLinkMap(cgs.IPFSLinks),
ClusterLinks: deserializeLinkMap(cgs.ClusterLinks),
ClustertoIPFS: ClustertoIPFS,
}
func NewMultiaddrWithValue(ma multiaddr.Multiaddr) Multiaddr {
return Multiaddr{Multiaddr: ma}
}
func serializeLinkMap(Links map[peer.ID][]peer.ID) map[string][]string {
LinksSerial := make(map[string][]string)
for k, v := range Links {
kS := peer.IDB58Encode(k)
LinksSerial[kS] = PeersToStrings(v)
}
return LinksSerial
func (maddr Multiaddr) MarshalJSON() ([]byte, error) {
return maddr.Multiaddr.MarshalJSON()
}
func deserializeLinkMap(LinksSerial map[string][]string) map[peer.ID][]peer.ID {
Links := make(map[peer.ID][]peer.ID)
for k, v := range LinksSerial {
pid, _ := peer.IDB58Decode(k)
Links[pid] = StringsToPeers(v)
}
return Links
func (maddr *Multiaddr) UnmarshalJSON(data []byte) error {
maddr.Multiaddr, _ = multiaddr.NewMultiaddr("")
return maddr.Multiaddr.UnmarshalJSON(data)
}
// SwarmPeers lists an ipfs daemon's peers
type SwarmPeers []peer.ID
// SwarmPeersSerial is the serialized form of SwarmPeers for RPC use
type SwarmPeersSerial []string
// ToSerial converts SwarmPeers to its Go-serializeable version
func (swarm SwarmPeers) ToSerial() SwarmPeersSerial {
return PeersToStrings(swarm)
func (maddr Multiaddr) MarshalBinary() ([]byte, error) {
return maddr.Multiaddr.MarshalBinary()
}
func (maddr *Multiaddr) UnmarshalBinary(data []byte) error {
datacopy := make([]byte, len(data)) // This is super important
copy(datacopy, data)
maddr.Multiaddr, _ = multiaddr.NewMultiaddr("")
return maddr.Multiaddr.UnmarshalBinary(datacopy)
}
// ToSwarmPeers converts a SwarmPeersSerial object to SwarmPeers.
func (swarmS SwarmPeersSerial) ToSwarmPeers() SwarmPeers {
return StringsToPeers(swarmS)
func (maddr Multiaddr) Value() multiaddr.Multiaddr {
return maddr.Multiaddr
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
Addresses []ma.Multiaddr
ClusterPeers []peer.ID
ClusterPeersAddresses []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
Peername string
ID peer.ID `json:"id" codec:"i,omitempty"`
Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"`
ClusterPeers []peer.ID `json:"cluster_peers" codec:"cp,omitempty"`
ClusterPeersAddresses []Multiaddr `json:"cluster_peers_addresses" codec:"cpa,omitempty"`
Version string `json:"version" codec:"v,omitempty"`
Commit string `json:"commit" codec:"c,omitempty"`
RPCProtocolVersion protocol.ID `json:"rpc_protocol_version" codec:"rv,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
IPFS IPFSID `json:"ipfs" codec:"ip,omitempty"`
Peername string `json:"peername" codec:"pn,omitempty"`
//PublicKey crypto.PubKey
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
ClusterPeers []string `json:"cluster_peers"`
ClusterPeersAddresses MultiaddrsSerial `json:"cluster_peers_addresses"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error"`
IPFS IPFSIDSerial `json:"ipfs"`
Peername string `json:"peername"`
//PublicKey []byte
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
//var pkey []byte
//if id.PublicKey != nil {
// pkey, _ = id.PublicKey.Bytes()
//}
p := ""
if id.ID != "" {
p = peer.IDB58Encode(id.ID)
}
return IDSerial{
ID: p,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: PeersToStrings(id.ClusterPeers),
ClusterPeersAddresses: MultiaddrsToSerial(id.ClusterPeersAddresses),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
Peername: id.Peername,
//PublicKey: pkey,
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
p, err := peer.IDB58Decode(ids.ID)
if err != nil {
logger.Debug(ids.ID, err)
}
id.ID = p
//if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
// id.PublicKey = pkey
//}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = StringsToPeers(ids.ClusterPeers)
id.ClusterPeersAddresses = ids.ClusterPeersAddresses.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToIPFSID()
id.Peername = ids.Peername
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial string
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
if addr != nil {
return MultiaddrSerial(addr.String())
}
return ""
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
str := string(addrS)
a, err := ma.NewMultiaddr(str)
if err != nil {
logger.Error(str, err)
}
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
if a != nil {
addrsS[i] = MultiaddrToSerial(a)
}
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
}
// CidsToStrings encodes cid.Cids to strings.
func CidsToStrings(cids []cid.Cid) []string {
strs := make([]string, len(cids))
for i, c := range cids {
strs[i] = c.String()
}
return strs
}
// StringsToCidSet decodes cid.Cids from strings.
func StringsToCidSet(strs []string) *cid.Set {
cids := cid.NewSet()
for _, str := range strs {
c, err := cid.Decode(str)
if err != nil {
logger.Error(str, err)
}
cids.Add(c)
}
return cids
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID `json:"id" codec:"i,omitempty"`
Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
}
// PinType specifies which sort of Pin object we are dealing with.
@ -819,23 +540,24 @@ func (po *PinOptions) FromQuery(q url.Values) {
type Pin struct {
PinOptions
Cid cid.Cid
Cid cid.Cid `json:"cid" codec:"c"`
// See PinType comments
Type PinType
Type PinType `json:"type" codec:"t,omitempty"`
// The peers to which this pin is allocated
Allocations []peer.ID
Allocations []peer.ID `json:"allocations" codec:"a,omitempty"`
// MaxDepth associated to this pin. -1 means
// recursive.
MaxDepth int
MaxDepth int `json:"max_depth" codec:"d,omitempty"`
// We carry a reference CID to this pin. For
// ClusterDAGs, it is the MetaPin CID. For the
// MetaPin it is the ClusterDAG CID. For Shards,
// it is the previous shard CID.
Reference cid.Cid
// When not needed the pointer is nil
Reference *cid.Cid `json:"reference" codec:"r,omitempty"`
}
// PinPath is a wrapper for holding pin options and path of the content.
@ -846,8 +568,8 @@ type PinPath struct {
// PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to
// be recursive and the pin to be of DataType.
func PinCid(c cid.Cid) Pin {
return Pin{
func PinCid(c cid.Cid) *Pin {
return &Pin{
Cid: c,
Type: DataType,
Allocations: []peer.ID{},
@ -857,46 +579,12 @@ func PinCid(c cid.Cid) Pin {
// PinWithOpts creates a new Pin calling PinCid(c) and then sets
// its PinOptions fields with the given options.
func PinWithOpts(c cid.Cid, opts PinOptions) Pin {
func PinWithOpts(c cid.Cid, opts PinOptions) *Pin {
p := PinCid(c)
p.PinOptions = opts
return p
}
// PinSerial is a serializable version of Pin
type PinSerial struct {
PinOptions
Cid string `json:"cid" codec:"c,omitempty"`
Type uint64 `json:"type" codec:"t,omitempty"`
Allocations []string `json:"allocations" codec:"a,omitempty"`
MaxDepth int `json:"max_depth" codec:"d,omitempty"`
Reference string `json:"reference" codec:"r,omitempty"`
}
// ToSerial converts a Pin to PinSerial.
func (pin Pin) ToSerial() PinSerial {
c := ""
if pin.Cid.Defined() {
c = pin.Cid.String()
}
ref := ""
if pin.Reference.Defined() {
ref = pin.Reference.String()
}
allocs := PeersToStrings(pin.Allocations)
return PinSerial{
Cid: c,
Allocations: allocs,
Type: uint64(pin.Type),
MaxDepth: pin.MaxDepth,
Reference: ref,
PinOptions: pin.PinOptions,
}
}
func convertPinType(t PinType) pb.Pin_PinType {
var i pb.Pin_PinType
for t != 1 {
@ -934,9 +622,11 @@ func (pin *Pin) ProtoMarshal() ([]byte, error) {
Type: convertPinType(pin.Type),
Allocations: allocs,
MaxDepth: int32(pin.MaxDepth),
Reference: pin.Reference.Bytes(),
Options: opts,
}
if ref := pin.Reference; ref != nil {
pbPin.Reference = ref.Bytes()
}
return proto.Marshal(pbPin)
}
@ -971,12 +661,11 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
pin.MaxDepth = int(pbPin.GetMaxDepth())
ref, err := cid.Cast(pbPin.GetReference())
if err != nil {
pin.Reference = cid.Undef
pin.Reference = nil
} else {
pin.Reference = ref
pin.Reference = &ref
}
pin.Reference = ref
opts := pbPin.GetOptions()
pin.ReplicationFactorMin = int(opts.GetReplicationFactorMin())
@ -991,34 +680,37 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
// Equals checks if two pins are the same (with the same allocations).
// If allocations are the same but in different order, they are still
// considered equivalent.
func (pin Pin) Equals(pin2 Pin) bool {
pin1s := pin.ToSerial()
pin2s := pin2.ToSerial()
if pin1s.Cid != pin2s.Cid {
func (pin *Pin) Equals(pin2 *Pin) bool {
if pin == nil && pin2 != nil || pin2 == nil && pin != nil {
return false
}
if pin1s.Name != pin2s.Name {
if !pin.Cid.Equals(pin2.Cid) {
return false
}
if pin1s.Type != pin2s.Type {
if pin.Name != pin2.Name {
return false
}
if pin1s.MaxDepth != pin2s.MaxDepth {
if pin.Type != pin2.Type {
return false
}
if pin1s.Reference != pin2s.Reference {
if pin.MaxDepth != pin2.MaxDepth {
return false
}
sort.Strings(pin1s.Allocations)
sort.Strings(pin2s.Allocations)
if pin.Reference != pin2.Reference {
return false
}
if strings.Join(pin1s.Allocations, ",") != strings.Join(pin2s.Allocations, ",") {
allocs1 := PeersToStrings(pin.Allocations)
sort.Strings(allocs1)
allocs2 := PeersToStrings(pin2.Allocations)
sort.Strings(allocs2)
if strings.Join(allocs1, ",") != strings.Join(allocs2, ",") {
return false
}
@ -1027,7 +719,7 @@ func (pin Pin) Equals(pin2 Pin) bool {
// IsRemotePin determines whether a Pin's ReplicationFactor has
// been met, so as to either pin or unpin it from the peer.
func (pin Pin) IsRemotePin(pid peer.ID) bool {
func (pin *Pin) IsRemotePin(pid peer.ID) bool {
if pin.ReplicationFactorMax < 0 || pin.ReplicationFactorMin < 0 {
return false
}
@ -1040,56 +732,13 @@ func (pin Pin) IsRemotePin(pid peer.ID) bool {
return true
}
// ToPin converts a PinSerial to its native form.
func (pins PinSerial) ToPin() Pin {
c, err := cid.Decode(pins.Cid)
if err != nil {
logger.Debug(pins.Cid, err)
}
var ref cid.Cid
if pins.Reference != "" {
ref, err = cid.Decode(pins.Reference)
if err != nil {
logger.Warning(pins.Reference, err)
}
}
return Pin{
Cid: c,
Allocations: StringsToPeers(pins.Allocations),
Type: PinType(pins.Type),
MaxDepth: pins.MaxDepth,
Reference: ref,
PinOptions: pins.PinOptions,
}
}
// Clone returns a deep copy of the PinSerial.
func (pins PinSerial) Clone() PinSerial {
new := pins // this copy all the simple fields.
// slices are pointers. We need to explicitally copy them.
new.Allocations = make([]string, len(pins.Allocations))
copy(new.Allocations, pins.Allocations)
return new
}
// DecodeCid retrieves just the cid from a PinSerial without
// allocating a Pin.
func (pins PinSerial) DecodeCid() cid.Cid {
c, err := cid.Decode(pins.Cid)
if err != nil {
logger.Debug(pins.Cid, err)
}
return c
}
// NodeWithMeta specifies a block of data and a set of optional metadata fields
// carrying information about the encoded ipld node
type NodeWithMeta struct {
Data []byte
Cid string
CumSize uint64 // Cumulative size
Format string
Data []byte `codec:"d,omitempty"`
Cid cid.Cid `codec:"c, omitempty"`
CumSize uint64 `codec:"s,omitempty"` // Cumulative size
Format string `codec:"f,omitempty"`
}
// Size returns how big is the block. It is different from CumSize, which
@ -1102,11 +751,11 @@ func (n *NodeWithMeta) Size() uint64 {
// pin allocations by a PinAllocator. IPFS cluster is agnostic to
// the Value, which should be interpreted by the PinAllocator.
type Metric struct {
Name string
Peer peer.ID
Value string
Expire int64
Valid bool
Name string `json:"name" codec:"n,omitempty"`
Peer peer.ID `json:"peer" codec:"p,omitempty"`
Value string `json:"value" codec:"v,omitempty"`
Expire int64 `json:"expire" codec:"e,omitempty"`
Valid bool `json:"valid" codec:"d,omitempty"`
}
// SetTTL sets Metric to expire after the given time.Duration
@ -1132,51 +781,6 @@ func (m *Metric) Discard() bool {
return !m.Valid || m.Expired()
}
// MetricSerial is a helper for JSON marshaling. The Metric type is already
// serializable, but not pretty to humans (API).
type MetricSerial struct {
Name string `json:"name"`
Peer string `json:"peer"`
Value string `json:"value"`
Expire int64 `json:"expire"`
Valid bool `json:"valid"`
}
// MarshalJSON allows a Metric to produce a JSON representation
// of itself.
func (m *Metric) MarshalJSON() ([]byte, error) {
return json.Marshal(&MetricSerial{
Name: m.Name,
Peer: peer.IDB58Encode(m.Peer),
Value: m.Value,
Expire: m.Expire,
})
}
// UnmarshalJSON decodes JSON on top of the Metric.
func (m *Metric) UnmarshalJSON(j []byte) error {
if bytes.Equal(j, []byte("null")) {
return nil
}
ms := &MetricSerial{}
err := json.Unmarshal(j, ms)
if err != nil {
return err
}
p, err := peer.IDB58Decode(ms.Peer)
if err != nil {
return err
}
m.Name = ms.Name
m.Peer = p
m.Value = ms.Value
m.Expire = ms.Expire
return nil
}
// Alert carries alerting information about a peer. WIP.
type Alert struct {
Peer peer.ID
@ -1185,8 +789,8 @@ type Alert struct {
// Error can be used by APIs to return errors.
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
Code int `json:"code" codec:"o,omitempty"`
Message string `json:"message" codec:"m,omitempty"`
}
// Error implements the error interface and returns the error's message.
@ -1196,6 +800,6 @@ func (e *Error) Error() string {
// IPFSRepoStat wraps information about the IPFS repository.
type IPFSRepoStat struct {
RepoSize uint64
StorageMax uint64
RepoSize uint64 `codec:"r,omitempty"`
StorageMax uint64 `codec:"s, omitempty"`
}

View File

@ -1,7 +1,7 @@
package api
import (
"fmt"
"bytes"
"net/url"
"reflect"
"strings"
@ -11,6 +11,8 @@ import (
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/ugorji/go/codec"
)
var testTime = time.Date(2017, 12, 31, 15, 45, 50, 0, time.UTC)
@ -51,180 +53,6 @@ func TestIPFSPinStatusFromString(t *testing.T) {
}
}
func TestGlobalPinInfoConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
gpi := GlobalPinInfo{
Cid: testCid1,
PeerMap: map[peer.ID]PinInfo{
testPeerID1: {
Cid: testCid1,
Peer: testPeerID1,
Status: TrackerStatusPinned,
TS: testTime,
},
},
}
newgpi := gpi.ToSerial().ToGlobalPinInfo()
if gpi.Cid.String() != newgpi.Cid.String() {
t.Error("mismatching CIDs")
}
if gpi.PeerMap[testPeerID1].Cid.String() != newgpi.PeerMap[testPeerID1].Cid.String() {
t.Error("mismatching PinInfo CIDs")
}
if !gpi.PeerMap[testPeerID1].TS.Equal(newgpi.PeerMap[testPeerID1].TS) {
t.Error("bad time")
}
}
func TestIDConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
id := ID{
ID: testPeerID1,
Addresses: []ma.Multiaddr{testMAddr},
ClusterPeers: []peer.ID{testPeerID2},
ClusterPeersAddresses: []ma.Multiaddr{testMAddr2},
Version: "testv",
Commit: "ab",
RPCProtocolVersion: "testp",
Error: "teste",
IPFS: IPFSID{
ID: testPeerID2,
Addresses: []ma.Multiaddr{testMAddr3},
Error: "abc",
},
}
newid := id.ToSerial().ToID()
if id.ID != newid.ID {
t.Error("mismatching Peer IDs")
}
if !id.Addresses[0].Equal(newid.Addresses[0]) {
t.Error("mismatching addresses")
}
if id.ClusterPeers[0] != newid.ClusterPeers[0] {
t.Error("mismatching clusterPeers")
}
if !id.ClusterPeersAddresses[0].Equal(newid.ClusterPeersAddresses[0]) {
t.Error("mismatching clusterPeersAddresses")
}
if id.Version != newid.Version ||
id.Commit != newid.Commit ||
id.RPCProtocolVersion != newid.RPCProtocolVersion ||
id.Error != newid.Error {
t.Error("some field didn't survive")
}
if id.IPFS.ID != newid.IPFS.ID {
t.Error("ipfs daemon id mismatch")
}
if !id.IPFS.Addresses[0].Equal(newid.IPFS.Addresses[0]) {
t.Error("mismatching addresses")
}
if id.IPFS.Error != newid.IPFS.Error {
t.Error("ipfs error mismatch")
}
}
func TestConnectGraphConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
cg := ConnectGraph{
ClusterID: testPeerID1,
IPFSLinks: map[peer.ID][]peer.ID{
testPeerID4: []peer.ID{testPeerID5, testPeerID6},
testPeerID5: []peer.ID{testPeerID4, testPeerID6},
testPeerID6: []peer.ID{testPeerID4, testPeerID5},
},
ClusterLinks: map[peer.ID][]peer.ID{
testPeerID1: []peer.ID{testPeerID2, testPeerID3},
testPeerID2: []peer.ID{testPeerID1, testPeerID3},
testPeerID3: []peer.ID{testPeerID1, testPeerID2},
},
ClustertoIPFS: map[peer.ID]peer.ID{
testPeerID1: testPeerID4,
testPeerID2: testPeerID5,
testPeerID3: testPeerID6,
},
}
cgNew := cg.ToSerial().ToConnectGraph()
if !reflect.DeepEqual(cg, cgNew) {
t.Fatal("The new connect graph should be equivalent to the old")
}
}
func TestMultiaddrConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
addrs := []ma.Multiaddr{testMAddr2}
new := MultiaddrsToSerial(addrs).ToMultiaddrs()
if !addrs[0].Equal(new[0]) {
t.Error("mismatch")
}
}
func TestPinConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
c := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
newc := c.ToSerial().ToPin()
if !c.Cid.Equals(newc.Cid) ||
c.Allocations[0] != newc.Allocations[0] ||
c.ReplicationFactorMin != newc.ReplicationFactorMin ||
c.ReplicationFactorMax != newc.ReplicationFactorMax ||
c.MaxDepth != newc.MaxDepth ||
!c.Reference.Equals(newc.Reference) ||
c.Name != newc.Name || c.Type != newc.Type {
fmt.Printf("c: %+v\ncnew: %+v\n", c, newc)
t.Fatal("mismatch")
}
if !c.Equals(newc) {
t.Error("all pin fields are equal but Equals returns false")
}
}
func TestMetric(t *testing.T) {
m := Metric{
Name: "hello",
@ -265,48 +93,6 @@ func TestMetric(t *testing.T) {
}
}
func BenchmarkPinSerial_ToPin(b *testing.B) {
pin := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
pinS := pin.ToSerial()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pinS.ToPin()
}
}
func BenchmarkPinSerial_DecodeCid(b *testing.B) {
pin := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
pinS := pin.ToSerial()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pinS.DecodeCid()
}
}
func TestConvertPinType(t *testing.T) {
for _, t1 := range []PinType{BadType, ShardType} {
i := convertPinType(t1)
@ -344,10 +130,37 @@ func checkDupTags(t *testing.T, name string, typ reflect.Type, tags map[string]s
}
}
// TestPinTags checks that we are not re-using the same codec tag for
// different fields in the Pin object.
func TestPinTags(t *testing.T) {
typ := reflect.TypeOf(PinSerial{})
// TestDupTags checks that we are not re-using the same codec tag for
// different fields in the types objects.
func TestDupTags(t *testing.T) {
typ := reflect.TypeOf(Pin{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(GlobalPinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(PinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ConnectGraph{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(NodeWithMeta{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Metric{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Error{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(IPFSRepoStat{})
checkDupTags(t, "codec", typ, nil)
}
@ -400,3 +213,65 @@ func TestPinOptionsQuery(t *testing.T) {
}
}
}
func TestIDCodec(t *testing.T) {
TestPeerID1, _ := peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ := peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ := peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
addr, _ := NewMultiaddr("/ip4/1.2.3.4")
id := &ID{
ID: TestPeerID1,
Addresses: []Multiaddr{addr},
ClusterPeers: []peer.ID{TestPeerID2},
ClusterPeersAddresses: []Multiaddr{addr},
Version: "2",
Commit: "",
RPCProtocolVersion: "abc",
Error: "",
IPFS: IPFSID{
ID: TestPeerID3,
Addresses: []Multiaddr{addr},
Error: "",
},
Peername: "hi",
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(id)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var id2 ID
err = dec.Decode(&id2)
if err != nil {
t.Fatal(err)
}
}
func TestPinCodec(t *testing.T) {
ci, _ := cid.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
pin := PinCid(ci)
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(pin)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var pin2 Pin
err = dec.Decode(&pin2)
if err != nil {
t.Fatal(err)
}
}

View File

@ -5,6 +5,8 @@ import (
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
multiaddr "github.com/multiformats/go-multiaddr"
)
// PeersToStrings IDB58Encodes a list of peers.
@ -57,11 +59,12 @@ func Libp2pMultiaddrSplit(addr ma.Multiaddr) (peer.ID, ma.Multiaddr, error) {
// MustLibp2pMultiaddrJoin takes a LibP2P multiaddress and a peer ID and
// encapsulates a new /ipfs/<peerID> address. It will panic if the given
// peer ID is bad.
func MustLibp2pMultiaddrJoin(addr ma.Multiaddr, p peer.ID) ma.Multiaddr {
pidAddr, err := ma.NewMultiaddr("/ipfs/" + peer.IDB58Encode(p))
func MustLibp2pMultiaddrJoin(addr Multiaddr, p peer.ID) Multiaddr {
v := addr.Value()
pidAddr, err := multiaddr.NewMultiaddr("/ipfs/" + peer.IDB58Encode(p))
// let this break badly
if err != nil {
panic("called MustLibp2pMultiaddrJoin with bad peer!")
}
return addr.Encapsulate(pidAddr)
return Multiaddr{Multiaddr: v.Encapsulate(pidAddr)}
}

View File

@ -230,7 +230,7 @@ func (c *Cluster) syncWatcher() {
}
}
func (c *Cluster) sendInformerMetric(ctx context.Context) (api.Metric, error) {
func (c *Cluster) sendInformerMetric(ctx context.Context) (*api.Metric, error) {
ctx, span := trace.StartSpan(ctx, "cluster/sendInformerMetric")
defer span.End()
@ -288,7 +288,7 @@ func (c *Cluster) pushPingMetrics(ctx context.Context) {
ticker := time.NewTicker(c.config.MonitorPingInterval)
for {
metric := api.Metric{
metric := &api.Metric{
Name: pingMetricName,
Peer: c.id,
Valid: true,
@ -562,21 +562,26 @@ func (c *Cluster) Done() <-chan struct{} {
}
// ID returns information about the Cluster peer
func (c *Cluster) ID(ctx context.Context) api.ID {
func (c *Cluster) ID(ctx context.Context) *api.ID {
_, span := trace.StartSpan(ctx, "cluster/ID")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
// ignore error since it is included in response object
ipfsID, _ := c.ipfs.ID(ctx)
var addrs []ma.Multiaddr
ipfsID, err := c.ipfs.ID(ctx)
if err != nil {
ipfsID = &api.IPFSID{
Error: err.Error(),
}
}
var addrs []api.Multiaddr
addrsSet := make(map[string]struct{}) // to filter dups
for _, addr := range c.host.Addrs() {
addrsSet[addr.String()] = struct{}{}
}
for k := range addrsSet {
addr, _ := ma.NewMultiaddr(k)
addr, _ := api.NewMultiaddr(k)
addrs = append(addrs, api.MustLibp2pMultiaddrJoin(addr, c.id))
}
@ -587,7 +592,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
peers, _ = c.consensus.Peers(ctx)
}
return api.ID{
return &api.ID{
ID: c.id,
//PublicKey: c.host.Peerstore().PubKey(c.id),
Addresses: addrs,
@ -595,7 +600,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
ClusterPeersAddresses: c.peerManager.PeersAddresses(peers),
Version: version.Version.String(),
RPCProtocolVersion: version.RPCProtocol,
IPFS: ipfsID,
IPFS: *ipfsID,
Peername: c.config.Peername,
}
}
@ -610,7 +615,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
//
// The new peer ID will be passed to the consensus
// component to be added to the peerset.
func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
_, span := trace.StartSpan(ctx, "cluster/PeerAdd")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -626,7 +631,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
err := c.consensus.AddPeer(ctx, pid)
if err != nil {
logger.Error(err)
id := api.ID{ID: pid, Error: err.Error()}
id := &api.ID{ID: pid, Error: err.Error()}
return id, err
}
@ -643,7 +648,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
logger.Error(err)
}
id := api.ID{}
id := &api.ID{}
// wait up to 2 seconds for new peer to catch up
// and return an up to date api.ID object.
@ -719,13 +724,13 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
// Note that PeerAdd() on the remote peer will
// figure out what our real address is (obviously not
// ListenAddr).
var myID api.IDSerial
var myID api.ID
err = c.rpcClient.CallContext(
ctx,
pid,
"Cluster",
"PeerAdd",
peer.IDB58Encode(c.id),
c.id,
&myID,
)
if err != nil {
@ -814,7 +819,7 @@ func (c *Cluster) StateSync(ctx context.Context) error {
// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers.
// If an error happens, the slice will contain as much information as
// could be fetched from other peers.
func (c *Cluster) StatusAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
func (c *Cluster) StatusAll(ctx context.Context) ([]*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/StatusAll")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -823,7 +828,7 @@ func (c *Cluster) StatusAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
}
// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer.
func (c *Cluster) StatusAllLocal(ctx context.Context) []api.PinInfo {
func (c *Cluster) StatusAllLocal(ctx context.Context) []*api.PinInfo {
_, span := trace.StartSpan(ctx, "cluster/StatusAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -834,7 +839,7 @@ func (c *Cluster) StatusAllLocal(ctx context.Context) []api.PinInfo {
// Status returns the GlobalPinInfo for a given Cid as fetched from all
// current peers. If an error happens, the GlobalPinInfo should contain
// as much information as could be fetched from the other peers.
func (c *Cluster) Status(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Status(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Status")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -843,7 +848,7 @@ func (c *Cluster) Status(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, err
}
// StatusLocal returns this peer's PinInfo for a given Cid.
func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) api.PinInfo {
func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) *api.PinInfo {
_, span := trace.StartSpan(ctx, "cluster/StatusLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -855,7 +860,7 @@ func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) api.PinInfo {
// that the state of tracked items matches the state reported by the IPFS daemon
// and returning the results as GlobalPinInfo. If an error happens, the slice
// will contain as much information as could be fetched from the peers.
func (c *Cluster) SyncAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
func (c *Cluster) SyncAll(ctx context.Context) ([]*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/SyncAll")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -868,7 +873,7 @@ func (c *Cluster) SyncAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
//
// SyncAllLocal returns the list of PinInfo that where updated because of
// the operation, along with those in error states.
func (c *Cluster) SyncAllLocal(ctx context.Context) ([]api.PinInfo, error) {
func (c *Cluster) SyncAllLocal(ctx context.Context) ([]*api.PinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/SyncAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -885,7 +890,7 @@ func (c *Cluster) SyncAllLocal(ctx context.Context) ([]api.PinInfo, error) {
// Sync triggers a SyncLocal() operation for a given Cid.
// in all cluster peers.
func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Sync")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -897,14 +902,14 @@ func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error
func (c *Cluster) localPinInfoOp(
ctx context.Context,
h cid.Cid,
f func(context.Context, cid.Cid) (api.PinInfo, error),
) (pInfo api.PinInfo, err error) {
f func(context.Context, cid.Cid) (*api.PinInfo, error),
) (pInfo *api.PinInfo, err error) {
ctx, span := trace.StartSpan(ctx, "cluster/localPinInfoOp")
defer span.End()
cids, err := c.cidsFromMetaPin(ctx, h)
if err != nil {
return api.PinInfo{}, err
return nil, err
}
for _, ci := range cids {
@ -923,7 +928,7 @@ func (c *Cluster) localPinInfoOp(
// SyncLocal performs a local sync operation for the given Cid. This will
// tell the tracker to verify the status of the Cid against the IPFS daemon.
// It returns the updated PinInfo for the Cid.
func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo *api.PinInfo, err error) {
_, span := trace.StartSpan(ctx, "cluster/SyncLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -933,7 +938,7 @@ func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo,
// RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked
// by this peer.
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) {
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]*api.PinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -943,7 +948,7 @@ func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) {
// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Recover")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -953,7 +958,7 @@ func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, er
// RecoverLocal triggers a recover operation for a given Cid in this peer only.
// It returns the updated PinInfo, after recovery.
func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo *api.PinInfo, err error) {
_, span := trace.StartSpan(ctx, "cluster/RecoverLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -965,7 +970,7 @@ func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInf
// of the current global state. This is the source of truth as to which
// pins are managed and their allocation, but does not indicate if
// the item is successfully pinned. For that, use StatusAll().
func (c *Cluster) Pins(ctx context.Context) []api.Pin {
func (c *Cluster) Pins(ctx context.Context) []*api.Pin {
_, span := trace.StartSpan(ctx, "cluster/Pins")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -973,7 +978,7 @@ func (c *Cluster) Pins(ctx context.Context) []api.Pin {
cState, err := c.consensus.State(ctx)
if err != nil {
logger.Error(err)
return []api.Pin{}
return []*api.Pin{}
}
return cState.List(ctx)
}
@ -984,14 +989,14 @@ func (c *Cluster) Pins(ctx context.Context) []api.Pin {
// assigned for the requested Cid, but does not indicate if
// the item is successfully pinned. For that, use Status(). PinGet
// returns an error if the given Cid is not part of the global state.
func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (api.Pin, error) {
func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/PinGet")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
st, err := c.consensus.State(ctx)
if err != nil {
return api.PinCid(h), err
return nil, err
}
pin, ok := st.Get(ctx, h)
if !ok {
@ -1016,7 +1021,7 @@ func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (api.Pin, error) {
// this set then the remaining peers are allocated in order from the rest of
// the cluster. Priority allocations are best effort. If any priority peers
// are unavailable then Pin will simply allocate from the rest of the cluster.
func (c *Cluster) Pin(ctx context.Context, pin api.Pin) error {
func (c *Cluster) Pin(ctx context.Context, pin *api.Pin) error {
_, span := trace.StartSpan(ctx, "cluster/Pin")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1044,7 +1049,7 @@ func (c *Cluster) setupReplicationFactor(pin *api.Pin) error {
func checkPinType(pin *api.Pin) error {
switch pin.Type {
case api.DataType:
if pin.Reference != cid.Undef {
if pin.Reference != nil {
return errors.New("data pins should not reference other pins")
}
case api.ShardType:
@ -1062,14 +1067,14 @@ func checkPinType(pin *api.Pin) error {
if pin.MaxDepth != 0 {
return errors.New("must pin roots directly")
}
if pin.Reference == cid.Undef {
if pin.Reference == nil {
return errors.New("clusterDAG pins should reference a Meta pin")
}
case api.MetaType:
if pin.Allocations != nil && len(pin.Allocations) != 0 {
return errors.New("meta pin should not specify allocations")
}
if pin.Reference == cid.Undef {
if pin.Reference == nil {
return errors.New("metaPins should reference a ClusterDAG")
}
@ -1102,7 +1107,7 @@ func (c *Cluster) setupPin(ctx context.Context, pin *api.Pin) error {
// able to evacuate a node and returns the pin object that it tried to pin, whether the pin was submitted
// to the consensus layer or skipped (due to error or to the fact
// that it was already valid) and errror.
func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, prioritylist []peer.ID) (api.Pin, bool, error) {
func (c *Cluster) pin(ctx context.Context, pin *api.Pin, blacklist []peer.ID, prioritylist []peer.ID) (*api.Pin, bool, error) {
ctx, span := trace.StartSpan(ctx, "cluster/pin")
defer span.End()
@ -1111,7 +1116,7 @@ func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, pri
}
// setup pin might produce some side-effects to our pin
err := c.setupPin(ctx, &pin)
err := c.setupPin(ctx, pin)
if err != nil {
return pin, false, err
}
@ -1147,7 +1152,7 @@ func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, pri
return pin, true, c.consensus.LogPin(ctx, pin)
}
func (c *Cluster) unpin(ctx context.Context, h cid.Cid) (api.Pin, error) {
func (c *Cluster) unpin(ctx context.Context, h cid.Cid) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/unpin")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1197,7 +1202,7 @@ func (c *Cluster) Unpin(ctx context.Context, h cid.Cid) error {
// nodes that it references. It handles the case where multiple parents
// reference the same metadata node, only unpinning those nodes without
// existing references
func (c *Cluster) unpinClusterDag(metaPin api.Pin) error {
func (c *Cluster) unpinClusterDag(metaPin *api.Pin) error {
ctx, span := trace.StartSpan(c.ctx, "cluster/unpinClusterDag")
defer span.End()
@ -1219,14 +1224,14 @@ func (c *Cluster) unpinClusterDag(metaPin api.Pin) error {
// PinPath pins an CID resolved from its IPFS Path. It returns the resolved
// Pin object.
func (c *Cluster) PinPath(ctx context.Context, path api.PinPath) (api.Pin, error) {
func (c *Cluster) PinPath(ctx context.Context, path *api.PinPath) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/PinPath")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
ci, err := c.ipfs.Resolve(ctx, path.Path)
if err != nil {
return api.Pin{}, err
return nil, err
}
p := api.PinCid(ci)
@ -1237,14 +1242,14 @@ func (c *Cluster) PinPath(ctx context.Context, path api.PinPath) (api.Pin, error
// UnpinPath unpins a CID resolved from its IPFS Path. If returns the
// previously pinned Pin object.
func (c *Cluster) UnpinPath(ctx context.Context, path string) (api.Pin, error) {
func (c *Cluster) UnpinPath(ctx context.Context, path string) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/UnpinPath")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
ci, err := c.ipfs.Resolve(ctx, path)
if err != nil {
return api.Pin{}, err
return nil, err
}
return c.unpin(ctx, ci)
@ -1272,7 +1277,7 @@ func (c *Cluster) Version() string {
}
// Peers returns the IDs of the members of this Cluster.
func (c *Cluster) Peers(ctx context.Context) []api.ID {
func (c *Cluster) Peers(ctx context.Context) []*api.ID {
_, span := trace.StartSpan(ctx, "cluster/Peers")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1281,12 +1286,11 @@ func (c *Cluster) Peers(ctx context.Context) []api.ID {
if err != nil {
logger.Error(err)
logger.Error("an empty list of peers will be returned")
return []api.ID{}
return []*api.ID{}
}
lenMembers := len(members)
peersSerial := make([]api.IDSerial, lenMembers, lenMembers)
peers := make([]api.ID, lenMembers, lenMembers)
peers := make([]*api.ID, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1297,43 +1301,37 @@ func (c *Cluster) Peers(ctx context.Context) []api.ID {
"Cluster",
"ID",
struct{}{},
rpcutil.CopyIDSerialsToIfaces(peersSerial),
rpcutil.CopyIDsToIfaces(peers),
)
for i, err := range errs {
if err != nil {
peersSerial[i].ID = peer.IDB58Encode(members[i])
peersSerial[i].Error = err.Error()
peers[i] = &api.ID{}
peers[i].ID = members[i]
peers[i].Error = err.Error()
}
}
for i, ps := range peersSerial {
peers[i] = ps.ToID()
}
return peers
}
func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoCid")
defer span.End()
pin := api.GlobalPinInfo{
pin := &api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[peer.ID]api.PinInfo),
PeerMap: make(map[string]*api.PinInfo),
}
members, err := c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return api.GlobalPinInfo{}, err
return nil, err
}
lenMembers := len(members)
replies := make([]api.PinInfoSerial, lenMembers, lenMembers)
arg := api.Pin{
Cid: h,
}
replies := make([]*api.PinInfo, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1342,63 +1340,49 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid
members,
"Cluster",
method,
arg.ToSerial(),
rpcutil.CopyPinInfoSerialToIfaces(replies),
h,
rpcutil.CopyPinInfoToIfaces(replies),
)
for i, rserial := range replies {
for i, r := range replies {
e := errs[i]
// Potentially rserial is empty. But ToPinInfo ignores all
// errors from underlying libraries. In that case .Status
// will be TrackerStatusUndefined (0)
r := rserial.ToPinInfo()
// No error. Parse and continue
if e == nil {
pin.PeerMap[members[i]] = r
pin.PeerMap[peer.IDB58Encode(members[i])] = r
continue
}
// Deal with error cases (err != nil): wrap errors in PinInfo
// In this case, we had no answer at all. The contacted peer
// must be offline or unreachable.
if r.Status == api.TrackerStatusUndefined {
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
pin.PeerMap[members[i]] = api.PinInfo{
Cid: h,
Peer: members[i],
PeerName: members[i].String(),
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: e.Error(),
}
} else { // there was an rpc error, but got a valid response :S
r.Error = e.Error()
pin.PeerMap[members[i]] = r
// unlikely to come down this path
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
pin.PeerMap[peer.IDB58Encode(members[i])] = &api.PinInfo{
Cid: h,
Peer: members[i],
PeerName: members[i].String(),
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: e.Error(),
}
}
return pin, nil
}
func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.GlobalPinInfo, error) {
func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoSlice")
defer span.End()
infos := make([]api.GlobalPinInfo, 0)
fullMap := make(map[string]api.GlobalPinInfo)
infos := make([]*api.GlobalPinInfo, 0)
fullMap := make(map[cid.Cid]*api.GlobalPinInfo)
members, err := c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return []api.GlobalPinInfo{}, err
return []*api.GlobalPinInfo{}, err
}
lenMembers := len(members)
replies := make([][]api.PinInfoSerial, lenMembers, lenMembers)
replies := make([][]*api.PinInfo, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1409,22 +1393,21 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
"Cluster",
method,
struct{}{},
rpcutil.CopyPinInfoSerialSliceToIfaces(replies),
rpcutil.CopyPinInfoSliceToIfaces(replies),
)
mergePins := func(pins []api.PinInfoSerial) {
for _, pserial := range pins {
p := pserial.ToPinInfo()
item, ok := fullMap[pserial.Cid]
mergePins := func(pins []*api.PinInfo) {
for _, p := range pins {
item, ok := fullMap[p.Cid]
if !ok {
fullMap[pserial.Cid] = api.GlobalPinInfo{
fullMap[p.Cid] = &api.GlobalPinInfo{
Cid: p.Cid,
PeerMap: map[peer.ID]api.PinInfo{
p.Peer: p,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(p.Peer): p,
},
}
} else {
item.PeerMap[p.Peer] = p
item.PeerMap[peer.IDB58Encode(p.Peer)] = p
}
}
}
@ -1441,9 +1424,8 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
// Merge any errors
for p, msg := range erroredPeers {
for cidStr := range fullMap {
c, _ := cid.Decode(cidStr)
fullMap[cidStr].PeerMap[p] = api.PinInfo{
for c := range fullMap {
fullMap[c].PeerMap[peer.IDB58Encode(p)] = &api.PinInfo{
Cid: c,
Peer: p,
Status: api.TrackerStatusClusterError,
@ -1460,25 +1442,25 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
return infos, nil
}
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/getIDForPeer")
defer span.End()
idSerial := api.ID{ID: pid}.ToSerial()
var id api.ID
err := c.rpcClient.CallContext(
ctx,
pid,
"Cluster",
"ID",
struct{}{},
&idSerial,
&id,
)
id := idSerial.ToID()
if err != nil {
logger.Error(err)
id.ID = pid
id.Error = err.Error()
}
return id, err
return &id, err
}
// cidsFromMetaPin expands a meta-pin and returns a list of Cids that
@ -1506,8 +1488,11 @@ func (c *Cluster) cidsFromMetaPin(ctx context.Context, h cid.Cid) ([]cid.Cid, er
return list, nil
}
list = append([]cid.Cid{pin.Reference}, list...)
clusterDagPin, err := c.PinGet(ctx, pin.Reference)
if pin.Reference == nil {
return nil, errors.New("MetaPin.Reference is unset")
}
list = append([]cid.Cid{*pin.Reference}, list...)
clusterDagPin, err := c.PinGet(ctx, *pin.Reference)
if err != nil {
return list, fmt.Errorf("could not get clusterDAG pin from state. Malformed pin?: %s", err)
}

View File

@ -55,8 +55,8 @@ type mockConnector struct {
blocks sync.Map
}
func (ipfs *mockConnector) ID(ctx context.Context) (api.IPFSID, error) {
return api.IPFSID{
func (ipfs *mockConnector) ID(ctx context.Context) (*api.IPFSID, error) {
return &api.IPFSID{
ID: test.TestPeerID1,
}, nil
}
@ -101,12 +101,12 @@ func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string
return m, nil
}
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
return []peer.ID{test.TestPeerID4, test.TestPeerID5}, nil
}
func (ipfs *mockConnector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
return api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
func (ipfs *mockConnector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error) {
return &api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
}
func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
@ -120,8 +120,8 @@ func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, e
func (ipfs *mockConnector) ConnectSwarms(ctx context.Context) error { return nil }
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
func (ipfs *mockConnector) BlockPut(ctx context.Context, nwm api.NodeWithMeta) error {
ipfs.blocks.Store(nwm.Cid, nwm.Data)
func (ipfs *mockConnector) BlockPut(ctx context.Context, nwm *api.NodeWithMeta) error {
ipfs.blocks.Store(nwm.Cid.String(), nwm.Data)
return nil
}
@ -287,7 +287,7 @@ func TestClusterPinPath(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
pin, err := cl.PinPath(ctx, api.PinPath{Path: test.TestPathIPFS2})
pin, err := cl.PinPath(ctx, &api.PinPath{Path: test.TestPathIPFS2})
if err != nil {
t.Fatal("pin should have worked:", err)
}
@ -296,7 +296,7 @@ func TestClusterPinPath(t *testing.T) {
}
// test an error case
_, err = cl.PinPath(ctx, api.PinPath{Path: test.TestInvalidPath1})
_, err = cl.PinPath(ctx, &api.PinPath{Path: test.TestInvalidPath1})
if err == nil {
t.Error("expected an error but things worked")
}
@ -390,7 +390,7 @@ func TestUnpinShard(t *testing.T) {
pinnedCids := []cid.Cid{}
pinnedCids = append(pinnedCids, root)
metaPin, _ := cl.PinGet(ctx, root)
cDag, _ := cl.PinGet(ctx, metaPin.Reference)
cDag, _ := cl.PinGet(ctx, *metaPin.Reference)
pinnedCids = append(pinnedCids, cDag.Cid)
cDagBlock, _ := cl.ipfs.BlockGet(ctx, cDag.Cid)
cDagNode, _ := sharding.CborDataToNode(cDagBlock, "cbor")
@ -816,7 +816,7 @@ func TestClusterUnpinPath(t *testing.T) {
}
// Unpin after pin should succeed
pin, err := cl.PinPath(ctx, api.PinPath{Path: test.TestPathIPFS2})
pin, err := cl.PinPath(ctx, &api.PinPath{Path: test.TestPathIPFS2})
if err != nil {
t.Fatal("pin with should have worked:", err)
}

View File

@ -22,64 +22,17 @@ func jsonFormatObject(resp interface{}) {
switch resp.(type) {
case nil:
return
case api.ID:
jsonFormatPrint(resp.(api.ID).ToSerial())
case api.GlobalPinInfo:
jsonFormatPrint(resp.(api.GlobalPinInfo).ToSerial())
case api.Pin:
jsonFormatPrint(resp.(api.Pin).ToSerial())
case api.AddedOutput:
jsonFormatPrint(resp.(api.AddedOutput))
case addedOutputQuiet:
// print original object as in JSON it does
// not make sense to have a human "quiet" output.
jsonFormatPrint(resp.(addedOutputQuiet).added)
case api.Version:
jsonFormatPrint(resp.(api.Version))
case api.Metric:
serial := resp.(api.Metric)
textFormatPrintMetric(&serial)
case api.Error:
jsonFormatPrint(resp.(api.Error))
case []api.ID:
r := resp.([]api.ID)
serials := make([]api.IDSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.GlobalPinInfo:
r := resp.([]api.GlobalPinInfo)
serials := make([]api.GlobalPinInfoSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.Pin:
r := resp.([]api.Pin)
serials := make([]api.PinSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.AddedOutput:
serials := resp.([]api.AddedOutput)
jsonFormatPrint(serials)
case []addedOutputQuiet:
case []*addedOutputQuiet:
// print original objects as in JSON it makes
// no sense to have a human "quiet" output
serials := resp.([]addedOutputQuiet)
serials := resp.([]*addedOutputQuiet)
var actual []*api.AddedOutput
for _, s := range serials {
actual = append(actual, s.added)
}
jsonFormatPrint(actual)
case []api.Metric:
serials := resp.([]api.Metric)
jsonFormatPrint(serials)
default:
checkErr("", errors.New("unsupported type returned"))
jsonFormatPrint(resp)
}
}
@ -93,52 +46,44 @@ func textFormatObject(resp interface{}) {
switch resp.(type) {
case nil:
return
case api.ID:
serial := resp.(api.ID).ToSerial()
textFormatPrintIDSerial(&serial)
case api.GlobalPinInfo:
serial := resp.(api.GlobalPinInfo).ToSerial()
textFormatPrintGPInfo(&serial)
case api.Pin:
serial := resp.(api.Pin).ToSerial()
textFormatPrintPin(&serial)
case api.AddedOutput:
serial := resp.(api.AddedOutput)
textFormatPrintAddedOutput(&serial)
case addedOutputQuiet:
serial := resp.(addedOutputQuiet)
textFormatPrintAddedOutputQuiet(&serial)
case api.Version:
serial := resp.(api.Version)
textFormatPrintVersion(&serial)
case api.Error:
serial := resp.(api.Error)
textFormatPrintError(&serial)
case api.Metric:
serial := resp.(api.Metric)
textFormatPrintMetric(&serial)
case []api.ID:
for _, item := range resp.([]api.ID) {
case *api.ID:
textFormatPrintID(resp.(*api.ID))
case *api.GlobalPinInfo:
textFormatPrintGPInfo(resp.(*api.GlobalPinInfo))
case *api.Pin:
textFormatPrintPin(resp.(*api.Pin))
case *api.AddedOutput:
textFormatPrintAddedOutput(resp.(*api.AddedOutput))
case *addedOutputQuiet:
textFormatPrintAddedOutputQuiet(resp.(*addedOutputQuiet))
case *api.Version:
textFormatPrintVersion(resp.(*api.Version))
case *api.Error:
textFormatPrintError(resp.(*api.Error))
case *api.Metric:
textFormatPrintMetric(resp.(*api.Metric))
case []*api.ID:
for _, item := range resp.([]*api.ID) {
textFormatObject(item)
}
case []api.GlobalPinInfo:
for _, item := range resp.([]api.GlobalPinInfo) {
case []*api.GlobalPinInfo:
for _, item := range resp.([]*api.GlobalPinInfo) {
textFormatObject(item)
}
case []api.Pin:
for _, item := range resp.([]api.Pin) {
case []*api.Pin:
for _, item := range resp.([]*api.Pin) {
textFormatObject(item)
}
case []api.AddedOutput:
for _, item := range resp.([]api.AddedOutput) {
case []*api.AddedOutput:
for _, item := range resp.([]*api.AddedOutput) {
textFormatObject(item)
}
case []addedOutputQuiet:
for _, item := range resp.([]addedOutputQuiet) {
case []*addedOutputQuiet:
for _, item := range resp.([]*addedOutputQuiet) {
textFormatObject(item)
}
case []api.Metric:
for _, item := range resp.([]api.Metric) {
case []*api.Metric:
for _, item := range resp.([]*api.Metric) {
textFormatObject(item)
}
default:
@ -146,16 +91,22 @@ func textFormatObject(resp interface{}) {
}
}
func textFormatPrintIDSerial(obj *api.IDSerial) {
func textFormatPrintID(obj *api.ID) {
if obj.Error != "" {
fmt.Printf("%s | ERROR: %s\n", obj.ID, obj.Error)
return
}
fmt.Printf("%s | %s | Sees %d other peers\n", obj.ID, obj.Peername, len(obj.ClusterPeers)-1)
fmt.Printf(
"%s | %s | Sees %d other peers\n",
obj.ID.Pretty(),
obj.Peername,
len(obj.ClusterPeers)-1,
)
addrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.Addresses {
addrs = append(addrs, string(a))
addrs = append(addrs, a.String())
}
addrs.Sort()
fmt.Println(" > Addresses:")
@ -169,7 +120,7 @@ func textFormatPrintIDSerial(obj *api.IDSerial) {
ipfsAddrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.IPFS.Addresses {
ipfsAddrs = append(ipfsAddrs, string(a))
ipfsAddrs = append(ipfsAddrs, a.String())
}
ipfsAddrs.Sort()
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID)
@ -178,33 +129,34 @@ func textFormatPrintIDSerial(obj *api.IDSerial) {
}
}
func textFormatPrintGPInfo(obj *api.GlobalPinInfoSerial) {
func textFormatPrintGPInfo(obj *api.GlobalPinInfo) {
fmt.Printf("%s :\n", obj.Cid)
peers := make(sort.StringSlice, 0, len(obj.PeerMap))
peers := make([]string, 0, len(obj.PeerMap))
for k := range obj.PeerMap {
peers = append(peers, k)
}
peers.Sort()
sort.Strings(peers)
for _, k := range peers {
v := obj.PeerMap[k]
if len(v.PeerName) > 0 {
fmt.Printf(" > %-15s : %s", v.PeerName, strings.ToUpper(v.Status))
fmt.Printf(" > %-15s : %s", v.PeerName, strings.ToUpper(v.Status.String()))
} else {
fmt.Printf(" > %-15s : %s", k, strings.ToUpper(v.Status))
fmt.Printf(" > %-15s : %s", k, strings.ToUpper(v.Status.String()))
}
if v.Error != "" {
fmt.Printf(": %s", v.Error)
}
fmt.Printf(" | %s\n", v.TS)
txt, _ := v.TS.MarshalText()
fmt.Printf(" | %s\n", txt)
}
}
func textFormatPrintPInfo(obj *api.PinInfoSerial) {
gpinfo := api.GlobalPinInfoSerial{
func textFormatPrintPInfo(obj *api.PinInfo) {
gpinfo := api.GlobalPinInfo{
Cid: obj.Cid,
PeerMap: map[string]api.PinInfoSerial{
obj.Peer: *obj,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(obj.Peer): obj,
},
}
textFormatPrintGPInfo(&gpinfo)
@ -214,14 +166,14 @@ func textFormatPrintVersion(obj *api.Version) {
fmt.Println(obj.Version)
}
func textFormatPrintPin(obj *api.PinSerial) {
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, strings.ToUpper(obj.ToPin().Type.String()))
func textFormatPrintPin(obj *api.Pin) {
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, strings.ToUpper(obj.Type.String()))
if obj.ReplicationFactorMin < 0 {
fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]")
} else {
var sortAlloc sort.StringSlice = obj.Allocations
sortAlloc.Sort()
sortAlloc := api.PeersToStrings(obj.Allocations)
sort.Strings(sortAlloc)
fmt.Printf("Repl. Factor: %d--%d | Allocations: %s",
obj.ReplicationFactorMin, obj.ReplicationFactorMax,
sortAlloc)

View File

@ -39,19 +39,21 @@ var errUnfinishedWrite = errors.New("could not complete write of line to output"
var errUnknownNodeType = errors.New("unsupported node type. Expected cluster or ipfs")
var errCorruptOrdering = errors.New("expected pid to have an ordering within dot writer")
func makeDot(cg api.ConnectGraphSerial, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]string)
func makeDot(cg *api.ConnectGraph, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]peer.ID)
for k, v := range cg.IPFSLinks {
ipfsEdges[k] = make([]string, 0)
ipfsEdges[k] = make([]peer.ID, 0)
for _, id := range v {
if _, ok := cg.IPFSLinks[id]; ok || allIpfs {
strPid := peer.IDB58Encode(id)
if _, ok := cg.IPFSLinks[strPid]; ok || allIpfs {
ipfsEdges[k] = append(ipfsEdges[k], id)
}
if allIpfs { // include all swarm peers in the graph
if _, ok := ipfsEdges[id]; !ok {
if _, ok := ipfsEdges[strPid]; !ok {
// if id in IPFSLinks this will be overwritten
// if id not in IPFSLinks this will stay blank
ipfsEdges[id] = make([]string, 0)
ipfsEdges[strPid] = make([]peer.ID, 0)
}
}
}
@ -76,15 +78,16 @@ type dotWriter struct {
w io.Writer
dotGraph dot.Graph
ipfsEdges map[string][]string
clusterEdges map[string][]string
clusterIpfsEdges map[string]string
ipfsEdges map[string][]peer.ID
clusterEdges map[string][]peer.ID
clusterIpfsEdges map[string]peer.ID
}
// writes nodes to dot file output and creates and stores an ordering over nodes
func (dW *dotWriter) addNode(id string, nT nodeType) error {
var node dot.VertexDescription
node.Label = shortID(id)
pid, _ := peer.IDB58Decode(id)
node.Label = pid.String()
switch nT {
case tCluster:
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
@ -130,7 +133,7 @@ func (dW *dotWriter) print() error {
for k, v := range dW.clusterEdges {
for _, id := range v {
toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[id]
fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
}
@ -140,7 +143,7 @@ func (dW *dotWriter) print() error {
// Write cluster to ipfs edges
for k, id := range dW.clusterIpfsEdges {
toNode := dW.clusterNodes[k]
fromNode := dW.ipfsNodes[id]
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
dW.dotGraph.AddNewLine()
@ -150,14 +153,14 @@ func (dW *dotWriter) print() error {
for k, v := range dW.ipfsEdges {
for _, id := range v {
toNode := dW.ipfsNodes[k]
fromNode := dW.ipfsNodes[id]
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
}
return dW.dotGraph.WriteDot(dW.w)
}
func sortedKeys(dict map[string][]string) []string {
func sortedKeys(dict map[string][]peer.ID) []string {
keys := make([]string, len(dict), len(dict))
i := 0
for k := range dict {
@ -167,17 +170,3 @@ func sortedKeys(dict map[string][]string) []string {
sort.Strings(keys)
return keys
}
// truncate the provided peer id string to the 3 last characters. Odds of
// pairwise collisions are less than 1 in 200,000 so with 70 cluster peers
// the chances of a collision are still less than 1 in 100 (birthday paradox).
// As clusters grow bigger than this we can provide a flag for including
// more characters.
func shortID(peerString string) string {
pid, err := peer.IDB58Decode(peerString)
if err != nil {
// Should never get here, panic
panic("shortID called on non-pid string")
}
return pid.String()
}

View File

@ -7,6 +7,8 @@ import (
"strings"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
@ -66,45 +68,58 @@ I2 -> I1
}`
var (
pid1, _ = peer.IDB58Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
pid2, _ = peer.IDB58Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ")
pid3, _ = peer.IDB58Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu")
pid4, _ = peer.IDB58Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV")
pid5, _ = peer.IDB58Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq")
pid6, _ = peer.IDB58Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL")
pid7, _ = peer.IDB58Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb")
pid8, _ = peer.IDB58Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8")
pid9, _ = peer.IDB58Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD")
)
func TestSimpleIpfsGraphs(t *testing.T) {
cg := api.ConnectGraphSerial{
ClusterID: "QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
ClusterLinks: map[string][]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": []string{
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
pid2,
pid3,
},
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
peer.IDB58Encode(pid2): []peer.ID{
pid1,
pid3,
},
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
peer.IDB58Encode(pid3): []peer.ID{
pid1,
pid2,
},
},
IPFSLinks: map[string][]string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV": []string{
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
pid5,
pid6,
},
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
peer.IDB58Encode(pid5): []peer.ID{
pid4,
pid6,
},
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
peer.IDB58Encode(pid6): []peer.ID{
pid4,
pid5,
},
},
ClustertoIPFS: map[string]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": "QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": "QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": "QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, false)
err := makeDot(&cg, buf, false)
if err != nil {
t.Fatal(err)
}
@ -161,54 +176,54 @@ I4 -> I5
}`
func TestIpfsAllGraphs(t *testing.T) {
cg := api.ConnectGraphSerial{
ClusterID: "QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
ClusterLinks: map[string][]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": []string{
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
pid2,
pid3,
},
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
peer.IDB58Encode(pid2): []peer.ID{
pid1,
pid3,
},
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
peer.IDB58Encode(pid3): []peer.ID{
pid1,
pid2,
},
},
IPFSLinks: map[string][]string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV": []string{
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
pid5,
pid6,
pid7,
pid8,
pid9,
},
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
peer.IDB58Encode(pid5): []peer.ID{
pid4,
pid6,
pid7,
pid8,
pid9,
},
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
peer.IDB58Encode(pid6): []peer.ID{
pid4,
pid5,
pid7,
pid8,
pid9,
},
},
ClustertoIPFS: map[string]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": "QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": "QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": "QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, true)
err := makeDot(&cg, buf, true)
if err != nil {
t.Fatal(err)
}

View File

@ -417,13 +417,13 @@ cluster "pin add".
go func() {
defer wg.Done()
var buffered []addedOutputQuiet
var lastBuf = make([]addedOutputQuiet, 1, 1)
var buffered []*addedOutputQuiet
var lastBuf = make([]*addedOutputQuiet, 1, 1)
var qq = c.Bool("quieter")
var q = c.Bool("quiet") || qq
var bufferResults = c.Bool("no-stream")
for v := range out {
added := addedOutputQuiet{v, q}
added := &addedOutputQuiet{v, q}
lastBuf[0] = added
if bufferResults {
buffered = append(buffered, added)
@ -940,11 +940,11 @@ func parseCredentials(userInput string) (string, string) {
func handlePinResponseFormatFlags(
ctx context.Context,
c *cli.Context,
pin api.Pin,
pin *api.Pin,
target api.TrackerStatus,
) {
var status api.GlobalPinInfo
var status *api.GlobalPinInfo
var cerr error
if c.Bool("wait") {
@ -968,7 +968,7 @@ func waitFor(
ci cid.Cid,
target api.TrackerStatus,
timeout time.Duration,
) (api.GlobalPinInfo, error) {
) (*api.GlobalPinInfo, error) {
ctx := context.Background()

View File

@ -114,16 +114,16 @@ func stateImport(ctx context.Context, r io.Reader) error {
return err
}
pinSerials := make([]api.PinSerial, 0)
pins := make([]*api.Pin, 0)
dec := json.NewDecoder(r)
err = dec.Decode(&pinSerials)
err = dec.Decode(&pins)
if err != nil {
return err
}
stateToImport := mapstate.NewMapState()
for _, pS := range pinSerials {
err = stateToImport.Add(ctx, pS.ToPin())
for _, p := range pins {
err = stateToImport.Add(ctx, p)
if err != nil {
return err
}
@ -170,15 +170,11 @@ func exportState(ctx context.Context, state state.State, w io.Writer) error {
// Serialize pins
pins := state.List(ctx)
pinSerials := make([]api.PinSerial, len(pins), len(pins))
for i, pin := range pins {
pinSerials[i] = pin.ToSerial()
}
// Write json to output file
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(pinSerials)
return enc.Encode(pins)
}
// CleanupState cleans the state

View File

@ -2,6 +2,7 @@ package ipfscluster
import (
peer "github.com/libp2p/go-libp2p-peer"
"go.opencensus.io/trace"
"github.com/ipfs/ipfs-cluster/api"
@ -15,16 +16,16 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
defer span.End()
cg := api.ConnectGraph{
IPFSLinks: make(map[peer.ID][]peer.ID),
ClusterLinks: make(map[peer.ID][]peer.ID),
ClustertoIPFS: make(map[peer.ID]peer.ID),
IPFSLinks: make(map[string][]peer.ID),
ClusterLinks: make(map[string][]peer.ID),
ClustertoIPFS: make(map[string]peer.ID),
}
members, err := c.consensus.Peers(ctx)
if err != nil {
return cg, err
}
peersSerials := make([][]api.IDSerial, len(members), len(members))
peers := make([][]*api.ID, len(members), len(members))
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(members))
defer rpcutil.MultiCancel(cancels)
@ -35,22 +36,22 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
"Cluster",
"Peers",
struct{}{},
rpcutil.CopyIDSerialSliceToIfaces(peersSerials),
rpcutil.CopyIDSliceToIfaces(peers),
)
for i, err := range errs {
p := members[i]
p := peer.IDB58Encode(members[i])
cg.ClusterLinks[p] = make([]peer.ID, 0)
if err != nil { // Only setting cluster connections when no error occurs
logger.Debugf("RPC error reaching cluster peer %s: %s", p.Pretty(), err.Error())
logger.Debugf("RPC error reaching cluster peer %s: %s", p, err.Error())
continue
}
selfConnection, pID := c.recordClusterLinks(&cg, p, peersSerials[i])
selfConnection, pID := c.recordClusterLinks(&cg, p, peers[i])
// IPFS connections
if !selfConnection {
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p.Pretty())
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p)
continue
}
c.recordIPFSLinks(&cg, pID)
@ -59,16 +60,15 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
return cg, nil
}
func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p peer.ID, sPeers []api.IDSerial) (bool, api.ID) {
func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []*api.ID) (bool, *api.ID) {
selfConnection := false
var pID api.ID
for _, sID := range sPeers {
id := sID.ToID()
var pID *api.ID
for _, id := range peers {
if id.Error != "" {
logger.Debugf("Peer %s errored connecting to its peer %s", p.Pretty(), id.ID.Pretty())
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty())
continue
}
if id.ID == p {
if peer.IDB58Encode(id.ID) == p {
selfConnection = true
pID = id
} else {
@ -78,27 +78,31 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p peer.ID, sPeers []a
return selfConnection, pID
}
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID api.ID) {
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID *api.ID) {
ipfsID := pID.IPFS.ID
if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs
logger.Warningf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
return
}
if _, ok := cg.IPFSLinks[pID.ID]; ok {
pid := peer.IDB58Encode(pID.ID)
ipfsPid := peer.IDB58Encode(ipfsID)
if _, ok := cg.IPFSLinks[pid]; ok {
logger.Warningf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
}
cg.ClustertoIPFS[pID.ID] = ipfsID
cg.IPFSLinks[ipfsID] = make([]peer.ID, 0)
var swarmPeersS api.SwarmPeersSerial
err := c.rpcClient.Call(pID.ID,
cg.ClustertoIPFS[pid] = ipfsID
cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0)
var swarmPeers []peer.ID
err := c.rpcClient.Call(
pID.ID,
"Cluster",
"IPFSSwarmPeers",
struct{}{},
&swarmPeersS,
&swarmPeers,
)
if err != nil {
return
}
swarmPeers := swarmPeersS.ToSwarmPeers()
cg.IPFSLinks[ipfsID] = swarmPeers
cg.IPFSLinks[ipfsPid] = swarmPeers
}

View File

@ -215,9 +215,9 @@ func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
return cc.readyCh
}
func (cc *Consensus) op(ctx context.Context, pin api.Pin, t LogOpType) *LogOp {
func (cc *Consensus) op(ctx context.Context, pin *api.Pin, t LogOpType) *LogOp {
return &LogOp{
Cid: pin.ToSerial(),
Cid: pin,
Type: t,
}
}
@ -272,8 +272,7 @@ func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, err
&struct{}{},
)
if finalErr != nil {
logger.Error(finalErr)
logger.Error("retrying to redirect request to leader")
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
continue
}
@ -342,12 +341,12 @@ func (cc *Consensus) commit(ctx context.Context, op *LogOp, rpcOp string, redire
// LogPin submits a Cid to the shared state of the cluster. It will forward
// the operation to the leader if this is not it.
func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error {
func (cc *Consensus) LogPin(ctx context.Context, pin *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "consensus/LogPin")
defer span.End()
op := cc.op(ctx, pin, LogOpPin)
err := cc.commit(ctx, op, "ConsensusLogPin", pin.ToSerial())
err := cc.commit(ctx, op, "ConsensusLogPin", pin)
if err != nil {
return err
}
@ -355,12 +354,12 @@ func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error {
}
// LogUnpin removes a Cid from the shared state of the cluster.
func (cc *Consensus) LogUnpin(ctx context.Context, pin api.Pin) error {
func (cc *Consensus) LogUnpin(ctx context.Context, pin *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "consensus/LogUnpin")
defer span.End()
op := cc.op(ctx, pin, LogOpUnpin)
err := cc.commit(ctx, op, "ConsensusLogUnpin", pin.ToSerial())
err := cc.commit(ctx, op, "ConsensusLogUnpin", pin)
if err != nil {
return err
}

View File

@ -21,7 +21,7 @@ func cleanRaft(idn int) {
os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn))
}
func testPin(c cid.Cid) api.Pin {
func testPin(c cid.Cid) *api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
@ -95,7 +95,7 @@ func TestConsensusPin(t *testing.T) {
t.Error("the operation did not make it to the log:", err)
}
time.Sleep(250 * time.Millisecond)
time.Sleep(5000 * time.Millisecond)
st, err := cc.State(ctx)
if err != nil {
t.Fatal("error getting state:", err)
@ -138,7 +138,7 @@ func TestConsensusUpdate(t *testing.T) {
// Update pin
c2, _ := cid.Decode(test.TestCid2)
pin.Reference = c2
pin.Reference = &c2
err = cc.LogPin(ctx, pin)
if err != nil {
t.Error("the update op did not make it to the log:", err)

View File

@ -26,12 +26,12 @@ type LogOpType int
// It implements the consensus.Op interface and it is used by the
// Consensus component.
type LogOp struct {
SpanCtx trace.SpanContext
TagCtx []byte
Cid api.PinSerial
Type LogOpType
consensus *Consensus
tracing bool
SpanCtx trace.SpanContext `codec:"sctx,omitempty"`
TagCtx []byte `codec:"tctx,omitempty"`
Cid *api.Pin `codec:"p,omitempty"`
Type LogOpType `codec:"t,omitempty"`
consensus *Consensus `codec:-`
tracing bool `codec:-`
}
// ApplyTo applies the operation to the State
@ -55,16 +55,16 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
panic("received unexpected state type")
}
// Copy the Cid. We are about to pass it to go-routines
// that will make things with it (read its fields). However,
// as soon as ApplyTo is done, the next operation will be deserealized
// on top of "op". This can cause data races with the slices in
// api.PinSerial, which don't get copied when passed.
pinS := op.Cid.Clone()
pin := op.Cid
// We are about to pass "pin" it to go-routines that will make things
// with it (read its fields). However, as soon as ApplyTo is done, the
// next operation will be deserealized on top of "op". We nullify it
// to make sure no data races occur.
op.Cid = nil
switch op.Type {
case LogOpPin:
err = state.Add(ctx, pinS.ToPin())
err = state.Add(ctx, pin)
if err != nil {
logger.Error(err)
goto ROLLBACK
@ -75,12 +75,12 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
"",
"Cluster",
"Track",
pinS,
pin,
&struct{}{},
nil,
)
case LogOpUnpin:
err = state.Rm(ctx, pinS.DecodeCid())
err = state.Rm(ctx, pin.Cid)
if err != nil {
logger.Error(err)
goto ROLLBACK
@ -91,7 +91,7 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
"",
"Cluster",
"Untrack",
pinS,
pin.Cid,
&struct{}{},
nil,
)

View File

@ -15,7 +15,7 @@ func TestApplyToPin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.MustDecodeCid(test.TestCid1)),
Type: LogOpPin,
consensus: cc,
}
@ -34,7 +34,7 @@ func TestApplyToUnpin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.MustDecodeCid(test.TestCid1)),
Type: LogOpUnpin,
consensus: cc,
}
@ -59,7 +59,7 @@ func TestApplyToBadState(t *testing.T) {
}()
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.MustDecodeCid(test.TestCid1)),
Type: LogOpUnpin,
}

View File

@ -67,12 +67,12 @@ func (disk *Informer) Shutdown(ctx context.Context) error {
// GetMetric returns the metric obtained by this
// Informer.
func (disk *Informer) GetMetric(ctx context.Context) api.Metric {
func (disk *Informer) GetMetric(ctx context.Context) *api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/disk/GetMetric")
defer span.End()
if disk.rpcClient == nil {
return api.Metric{
return &api.Metric{
Name: disk.Name(),
Valid: false,
}
@ -103,7 +103,7 @@ func (disk *Informer) GetMetric(ctx context.Context) api.Metric {
}
}
m := api.Metric{
m := &api.Metric{
Name: disk.Name(),
Value: fmt.Sprintf("%d", metric),
Valid: valid,

View File

@ -58,12 +58,12 @@ func (npi *Informer) Name() string {
// GetMetric contacts the IPFSConnector component and
// requests the `pin ls` command. We return the number
// of pins in IPFS.
func (npi *Informer) GetMetric(ctx context.Context) api.Metric {
func (npi *Informer) GetMetric(ctx context.Context) *api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric")
defer span.End()
if npi.rpcClient == nil {
return api.Metric{
return &api.Metric{
Valid: false,
}
}
@ -83,7 +83,7 @@ func (npi *Informer) GetMetric(ctx context.Context) api.Metric {
valid := err == nil
m := api.Metric{
m := &api.Metric{
Name: MetricName,
Value: fmt.Sprintf("%d", len(pinMap)),
Valid: valid,

View File

@ -39,9 +39,9 @@ type Consensus interface {
// allowing the main component to wait for it during start.
Ready(context.Context) <-chan struct{}
// Logs a pin operation
LogPin(ctx context.Context, c api.Pin) error
LogPin(ctx context.Context, c *api.Pin) error
// Logs an unpin operation
LogUnpin(ctx context.Context, c api.Pin) error
LogUnpin(ctx context.Context, c *api.Pin) error
AddPeer(ctx context.Context, p peer.ID) error
RmPeer(ctx context.Context, p peer.ID) error
State(context.Context) (state.State, error)
@ -67,7 +67,7 @@ type API interface {
// an IPFS daemon. This is a base component.
type IPFSConnector interface {
Component
ID(context.Context) (api.IPFSID, error)
ID(context.Context) (*api.IPFSID, error)
Pin(context.Context, cid.Cid, int) error
Unpin(context.Context, cid.Cid) error
PinLsCid(context.Context, cid.Cid) (api.IPFSPinStatus, error)
@ -76,17 +76,17 @@ type IPFSConnector interface {
// other peers IPFS daemons.
ConnectSwarms(context.Context) error
// SwarmPeers returns the IPFS daemon's swarm peers
SwarmPeers(context.Context) (api.SwarmPeers, error)
SwarmPeers(context.Context) ([]peer.ID, error)
// ConfigKey returns the value for a configuration key.
// Subobjects are reached with keypaths as "Parent/Child/GrandChild...".
ConfigKey(keypath string) (interface{}, error)
// RepoStat returns the current repository size and max limit as
// provided by "repo stat".
RepoStat(context.Context) (api.IPFSRepoStat, error)
RepoStat(context.Context) (*api.IPFSRepoStat, error)
// Resolve returns a cid given a path
Resolve(context.Context, string) (cid.Cid, error)
// BlockPut directly adds a block of data to the IPFS repo
BlockPut(context.Context, api.NodeWithMeta) error
BlockPut(context.Context, *api.NodeWithMeta) error
// BlockGet retrieves the raw data of an IPFS block
BlockGet(context.Context, cid.Cid) ([]byte, error)
}
@ -106,24 +106,24 @@ type PinTracker interface {
Component
// Track tells the tracker that a Cid is now under its supervision
// The tracker may decide to perform an IPFS pin.
Track(context.Context, api.Pin) error
Track(context.Context, *api.Pin) error
// Untrack tells the tracker that a Cid is to be forgotten. The tracker
// may perform an IPFS unpin operation.
Untrack(context.Context, cid.Cid) error
// StatusAll returns the list of pins with their local status.
StatusAll(context.Context) []api.PinInfo
StatusAll(context.Context) []*api.PinInfo
// Status returns the local status of a given Cid.
Status(context.Context, cid.Cid) api.PinInfo
Status(context.Context, cid.Cid) *api.PinInfo
// SyncAll makes sure that all tracked Cids reflect the real IPFS status.
// It returns the list of pins which were updated by the call.
SyncAll(context.Context) ([]api.PinInfo, error)
SyncAll(context.Context) ([]*api.PinInfo, error)
// Sync makes sure that the Cid status reflect the real IPFS status.
// It returns the local status of the Cid.
Sync(context.Context, cid.Cid) (api.PinInfo, error)
Sync(context.Context, cid.Cid) (*api.PinInfo, error)
// RecoverAll calls Recover() for all pins tracked.
RecoverAll(context.Context) ([]api.PinInfo, error)
RecoverAll(context.Context) ([]*api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in a Cids with error status.
Recover(context.Context, cid.Cid) (api.PinInfo, error)
Recover(context.Context, cid.Cid) (*api.PinInfo, error)
}
// Informer provides Metric information from a peer. The metrics produced by
@ -133,7 +133,7 @@ type PinTracker interface {
type Informer interface {
Component
Name() string
GetMetric(context.Context) api.Metric
GetMetric(context.Context) *api.Metric
}
// PinAllocator decides where to pin certain content. In order to make such
@ -148,7 +148,7 @@ type PinAllocator interface {
// which are currently pinning the content. The candidates map
// contains the metrics for all peers which are eligible for pinning
// the content.
Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error)
Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]*api.Metric) ([]peer.ID, error)
}
// PeerMonitor is a component in charge of publishing a peer's metrics and
@ -162,17 +162,17 @@ type PeerMonitor interface {
Component
// LogMetric stores a metric. It can be used to manually inject
// a metric to a monitor.
LogMetric(context.Context, api.Metric) error
LogMetric(context.Context, *api.Metric) error
// PublishMetric sends a metric to the rest of the peers.
// How to send it, and to who, is to be decided by the implementation.
PublishMetric(context.Context, api.Metric) error
PublishMetric(context.Context, *api.Metric) error
// LatestMetrics returns a map with the latest metrics of matching name
// for the current cluster peers.
LatestMetrics(ctx context.Context, name string) []api.Metric
LatestMetrics(ctx context.Context, name string) []*api.Metric
// Alerts delivers alerts generated when this peer monitor detects
// a problem (i.e. metrics not arriving as expected). Alerts can be used
// to trigger self-healing measures or re-pinnings of content.
Alerts() <-chan api.Alert
Alerts() <-chan *api.Alert
}
// Tracer implements Component as a way

View File

@ -407,8 +407,8 @@ func TestClustersPeers(t *testing.T) {
t.Fatal("expected as many peers as clusters")
}
clusterIDMap := make(map[peer.ID]api.ID)
peerIDMap := make(map[peer.ID]api.ID)
clusterIDMap := make(map[peer.ID]*api.ID)
peerIDMap := make(map[peer.ID]*api.ID)
for _, c := range clusters {
id := c.ID(ctx)
@ -524,7 +524,8 @@ func TestClustersStatusAll(t *testing.T) {
t.Error("bad info in status")
}
if info[c.host.ID()].Status != api.TrackerStatusPinned {
pid := peer.IDB58Encode(c.host.ID())
if info[pid].Status != api.TrackerStatusPinned {
t.Error("the hash should have been pinned")
}
@ -533,12 +534,13 @@ func TestClustersStatusAll(t *testing.T) {
t.Error(err)
}
pinfo, ok := status.PeerMap[c.host.ID()]
pinfo, ok := status.PeerMap[pid]
if !ok {
t.Fatal("Host not in status")
}
if pinfo.Status != api.TrackerStatusPinned {
t.Error(pinfo.Error)
t.Error("the status should show the hash as pinned")
}
}
@ -576,7 +578,8 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Error("bad number of peers in status")
}
errst := stts.PeerMap[clusters[1].ID(ctx).ID]
pid := peer.IDB58Encode(clusters[1].ID(ctx).ID)
errst := stts.PeerMap[pid]
if errst.Cid.String() != test.TestCid1 {
t.Error("errored pinInfo should have a good cid")
@ -592,7 +595,7 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Error(err)
}
pinfo := status.PeerMap[clusters[1].ID(ctx).ID]
pinfo := status.PeerMap[pid]
if pinfo.Status != api.TrackerStatusClusterError {
t.Error("erroring status should be ClusterError")
@ -693,7 +696,7 @@ func TestClustersSyncAll(t *testing.T) {
t.Error("expected globalsync to have problems with test.ErrorCid")
}
for _, c := range clusters {
inf, ok := ginfos[0].PeerMap[c.host.ID()]
inf, ok := ginfos[0].PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -721,7 +724,7 @@ func TestClustersSync(t *testing.T) {
// with errors contained in GlobalPinInfo
t.Fatal("did not expect an error")
}
pinfo, ok := ginfo.PeerMap[clusters[j].host.ID()]
pinfo, ok := ginfo.PeerMap[peer.IDB58Encode(clusters[j].host.ID())]
if !ok {
t.Fatal("should have info for this host")
}
@ -734,7 +737,7 @@ func TestClustersSync(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Logf("%+v", ginfo)
t.Fatal("GlobalPinInfo should not be empty for this host")
@ -756,7 +759,7 @@ func TestClustersSync(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -837,7 +840,7 @@ func TestClustersRecover(t *testing.T) {
t.Fatal(err)
}
pinfo, ok := ginfo.PeerMap[clusters[j].host.ID()]
pinfo, ok := ginfo.PeerMap[peer.IDB58Encode(clusters[j].host.ID())]
if !ok {
t.Fatal("should have info for this host")
}
@ -846,7 +849,7 @@ func TestClustersRecover(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should not be empty for this host")
}
@ -868,7 +871,7 @@ func TestClustersRecover(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -1345,8 +1348,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
pinDelay()
pin := clusters[j].Pins(ctx)[0]
pinSerial := pin.ToSerial()
allocs := sort.StringSlice(pinSerial.Allocations)
allocs := sort.StringSlice(api.PeersToStrings(pin.Allocations))
allocs.Sort()
allocsStr := fmt.Sprintf("%s", allocs)
@ -1360,8 +1362,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
pinDelay()
pin2 := clusters[j].Pins(ctx)[0]
pinSerial2 := pin2.ToSerial()
allocs2 := sort.StringSlice(pinSerial2.Allocations)
allocs2 := sort.StringSlice(api.PeersToStrings(pin2.Allocations))
allocs2.Sort()
allocsStr2 := fmt.Sprintf("%s", allocs2)
if allocsStr != allocsStr2 {
@ -1478,8 +1479,8 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
pinDelay()
pinLocal := 0
pinRemote := 0
var localPinner peer.ID
var remotePinner peer.ID
var localPinner string
var remotePinner string
var remotePinnerCluster *Cluster
status, _ := clusters[0].Status(ctx, h)
@ -1501,9 +1502,10 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
// kill the local pinner
for _, c := range clusters {
if c.id == localPinner {
clid := peer.IDB58Encode(c.id)
if clid == localPinner {
c.Shutdown(ctx)
} else if c.id == remotePinner {
} else if clid == remotePinner {
remotePinnerCluster = c
}
}
@ -1521,7 +1523,7 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
// peers in clusterIDs are fully connected to each other and the expected ipfs
// mock connectivity exists. Cluster peers not in clusterIDs are assumed to
// be disconnected and the graph should reflect this
func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[peer.ID]struct{}) {
func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[string]struct{}) {
// Check that all cluster peers see each other as peers
for id1, peers := range graph.ClusterLinks {
if _, ok := clusterIDs[id1]; !ok {
@ -1530,14 +1532,14 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[p
}
continue
}
fmt.Printf("id: %s, peers: %v\n", id1, peers)
t.Logf("id: %s, peers: %v\n", id1, peers)
if len(peers) > len(clusterIDs)-1 {
t.Errorf("More peers recorded in graph than expected")
}
// Make lookup index for peers connected to id1
peerIndex := make(map[peer.ID]struct{})
for _, peer := range peers {
peerIndex[peer] = struct{}{}
peerIndex := make(map[string]struct{})
for _, p := range peers {
peerIndex[peer.IDB58Encode(p)] = struct{}{}
}
for id2 := range clusterIDs {
if _, ok := peerIndex[id2]; id1 != id2 && !ok {
@ -1560,7 +1562,7 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[p
if len(graph.IPFSLinks) != 1 {
t.Error("Expected exactly one ipfs peer for all cluster nodes, the mocked peer")
}
links, ok := graph.IPFSLinks[test.TestPeerID1]
links, ok := graph.IPFSLinks[peer.IDB58Encode(test.TestPeerID1)]
if !ok {
t.Error("Expected the mocked ipfs peer to be a node in the graph")
} else {
@ -1598,9 +1600,9 @@ func TestClustersGraphConnected(t *testing.T) {
t.Fatal(err)
}
clusterIDs := make(map[peer.ID]struct{})
clusterIDs := make(map[string]struct{})
for _, c := range clusters {
id := c.ID(ctx).ID
id := peer.IDB58Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
validateClusterGraph(t, graph, clusterIDs)
@ -1642,12 +1644,12 @@ func TestClustersGraphUnhealthy(t *testing.T) {
t.Fatal(err)
}
clusterIDs := make(map[peer.ID]struct{})
clusterIDs := make(map[string]struct{})
for i, c := range clusters {
if i == discon1 || i == discon2 {
continue
}
id := c.ID(ctx).ID
id := peer.IDB58Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
validateClusterGraph(t, graph, clusterIDs)

View File

@ -30,7 +30,6 @@ import (
logging "github.com/ipfs/go-log"
rpc "github.com/libp2p/go-libp2p-gorpc"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
)
@ -219,38 +218,37 @@ func (ipfs *Connector) Shutdown(ctx context.Context) error {
// ID performs an ID request against the configured
// IPFS daemon. It returns the fetched information.
// If the request fails, or the parsing fails, it
// returns an error and an empty IPFSID which also
// contains the error message.
func (ipfs *Connector) ID(ctx context.Context) (api.IPFSID, error) {
// returns an error.
func (ipfs *Connector) ID(ctx context.Context) (*api.IPFSID, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/ID")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
id := api.IPFSID{}
body, err := ipfs.postCtx(ctx, "id", "", nil)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
var res ipfsIDResp
err = json.Unmarshal(body, &res)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
pID, err := peer.IDB58Decode(res.ID)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
id.ID = pID
mAddrs := make([]ma.Multiaddr, len(res.Addresses), len(res.Addresses))
id := &api.IPFSID{
ID: pID,
}
mAddrs := make([]api.Multiaddr, len(res.Addresses), len(res.Addresses))
for i, strAddr := range res.Addresses {
mAddr, err := ma.NewMultiaddr(strAddr)
mAddr, err := api.NewMultiaddr(strAddr)
if err != nil {
id.Error = err.Error()
return id, err
@ -498,30 +496,29 @@ func (ipfs *Connector) ConnectSwarms(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
idsSerial := make([]api.IDSerial, 0)
var ids []*api.ID
err := ipfs.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Peers",
struct{}{},
&idsSerial,
&ids,
)
if err != nil {
logger.Error(err)
return err
}
logger.Debugf("%+v", idsSerial)
for _, idSerial := range idsSerial {
ipfsID := idSerial.IPFS
for _, id := range ids {
ipfsID := id.IPFS
for _, addr := range ipfsID.Addresses {
// This is a best effort attempt
// We ignore errors which happens
// when passing in a bunch of addresses
_, err := ipfs.postCtx(
ctx,
fmt.Sprintf("swarm/connect?arg=%s", addr),
fmt.Sprintf("swarm/connect?arg=%s", addr.String()),
"",
nil,
)
@ -583,7 +580,7 @@ func getConfigValue(path []string, cfg map[string]interface{}) (interface{}, err
// RepoStat returns the DiskUsage and StorageMax repo/stat values from the
// ipfs daemon, in bytes, wrapped as an IPFSRepoStat object.
func (ipfs *Connector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
func (ipfs *Connector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoStat")
defer span.End()
@ -592,16 +589,16 @@ func (ipfs *Connector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
res, err := ipfs.postCtx(ctx, "repo/stat?size-only=true", "", nil)
if err != nil {
logger.Error(err)
return api.IPFSRepoStat{}, err
return nil, err
}
var stats api.IPFSRepoStat
err = json.Unmarshal(res, &stats)
if err != nil {
logger.Error(err)
return stats, err
return nil, err
}
return stats, nil
return &stats, nil
}
// Resolve accepts ipfs or ipns path and resolves it into a cid
@ -640,26 +637,26 @@ func (ipfs *Connector) Resolve(ctx context.Context, path string) (cid.Cid, error
}
// SwarmPeers returns the peers currently connected to this ipfs daemon.
func (ipfs *Connector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
func (ipfs *Connector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/SwarmPeers")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
swarm := api.SwarmPeers{}
res, err := ipfs.postCtx(ctx, "swarm/peers", "", nil)
if err != nil {
logger.Error(err)
return swarm, err
return nil, err
}
var peersRaw ipfsSwarmPeersResp
err = json.Unmarshal(res, &peersRaw)
if err != nil {
logger.Error(err)
return swarm, err
return nil, err
}
swarm = make([]peer.ID, len(peersRaw.Peers))
swarm := make([]peer.ID, len(peersRaw.Peers))
for i, p := range peersRaw.Peers {
pID, err := peer.IDB58Decode(p.Peer)
if err != nil {
@ -673,7 +670,7 @@ func (ipfs *Connector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
// BlockPut triggers an ipfs block put on the given data, inserting the block
// into the ipfs daemon's repo.
func (ipfs *Connector) BlockPut(ctx context.Context, b api.NodeWithMeta) error {
func (ipfs *Connector) BlockPut(ctx context.Context, b *api.NodeWithMeta) error {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/BlockPut")
defer span.End()

View File

@ -219,9 +219,9 @@ func TestBlockPut(t *testing.T) {
defer ipfs.Shutdown(ctx)
data := []byte(test.TestCid4Data)
err := ipfs.BlockPut(ctx, api.NodeWithMeta{
err := ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: data,
Cid: test.TestCid4,
Cid: test.MustDecodeCid(test.TestCid4),
Format: "raw",
})
if err != nil {
@ -246,9 +246,9 @@ func TestBlockGet(t *testing.T) {
}
// Put and then successfully get
err = ipfs.BlockPut(ctx, api.NodeWithMeta{
err = ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: test.TestShardData,
Cid: test.TestShardCid,
Cid: test.MustDecodeCid(test.TestShardCid),
Format: "cbor",
})
if err != nil {

View File

@ -13,6 +13,7 @@ var LoggingFacilities = map[string]string{
"ipfshttp": "INFO",
"monitor": "INFO",
"mapstate": "INFO",
"dsstate": "INFO",
"consensus": "INFO",
"pintracker": "INFO",
"ascendalloc": "INFO",

View File

@ -103,7 +103,7 @@ func (mon *Monitor) Shutdown(ctx context.Context) error {
}
// LogMetric stores a metric so it can later be retrieved.
func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) LogMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/basic/LogMetric")
defer span.End()
@ -113,7 +113,7 @@ func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
}
// PublishMetric broadcasts a metric to all current cluster peers.
func (mon *Monitor) PublishMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) PublishMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/basic/PublishMetric")
defer span.End()
@ -197,7 +197,7 @@ func (mon *Monitor) getPeers(ctx context.Context) ([]peer.ID, error) {
// LatestMetrics returns last known VALID metrics of a given type. A metric
// is only valid if it has not expired and belongs to a current cluster peers.
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric {
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []*api.Metric {
ctx, span := trace.StartSpan(ctx, "monitor/basic/LatestMetrics")
defer span.End()
@ -206,7 +206,7 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Make sure we only return metrics in the current peerset
peers, err := mon.getPeers(ctx)
if err != nil {
return []api.Metric{}
return []*api.Metric{}
}
return metrics.PeersetFilter(latest, peers)
@ -214,6 +214,6 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Alerts returns a channel on which alerts are sent when the
// monitor detects a failure.
func (mon *Monitor) Alerts() <-chan api.Alert {
func (mon *Monitor) Alerts() <-chan *api.Alert {
return mon.checker.Alerts()
}

View File

@ -28,10 +28,10 @@ func newMetricFactory() *metricFactory {
}
}
func (mf *metricFactory) newMetric(n string, p peer.ID) api.Metric {
func (mf *metricFactory) newMetric(n string, p peer.ID) *api.Metric {
mf.l.Lock()
defer mf.l.Unlock()
m := api.Metric{
m := &api.Metric{
Name: n,
Peer: p,
Value: fmt.Sprintf("%d", mf.counter),
@ -91,7 +91,7 @@ func TestLogMetricConcurrent(t *testing.T) {
f := func() {
defer wg.Done()
for i := 0; i < 25; i++ {
mt := api.Metric{
mt := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Value: fmt.Sprintf("%d", time.Now().UnixNano()),

View File

@ -19,7 +19,7 @@ var ErrAlertChannelFull = errors.New("alert channel is full")
// Checker provides utilities to find expired metrics
// for a given peerset and send alerts if it proceeds to do so.
type Checker struct {
alertCh chan api.Alert
alertCh chan *api.Alert
metrics *Store
}
@ -27,7 +27,7 @@ type Checker struct {
// MetricsStore.
func NewChecker(metrics *Store) *Checker {
return &Checker{
alertCh: make(chan api.Alert, AlertChannelCap),
alertCh: make(chan *api.Alert, AlertChannelCap),
metrics: metrics,
}
}
@ -49,7 +49,7 @@ func (mc *Checker) CheckPeers(peers []peer.ID) error {
}
func (mc *Checker) alert(pid peer.ID, metricName string) error {
alrt := api.Alert{
alrt := &api.Alert{
Peer: pid,
MetricName: metricName,
}
@ -62,7 +62,7 @@ func (mc *Checker) alert(pid peer.ID, metricName string) error {
}
// Alerts returns a channel which gets notified by CheckPeers.
func (mc *Checker) Alerts() <-chan api.Alert {
func (mc *Checker) Alerts() <-chan *api.Alert {
return mc.alertCh
}

View File

@ -15,7 +15,7 @@ func TestChecker(t *testing.T) {
metrics := NewStore()
checker := NewChecker(metrics)
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Value: "1",
@ -59,7 +59,7 @@ func TestCheckerWatch(t *testing.T) {
metrics := NewStore()
checker := NewChecker(metrics)
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Value: "1",

View File

@ -25,7 +25,7 @@ func NewStore() *Store {
}
// Add inserts a new metric in Metrics.
func (mtrs *Store) Add(m api.Metric) {
func (mtrs *Store) Add(m *api.Metric) {
mtrs.mux.Lock()
defer mtrs.mux.Unlock()
@ -49,16 +49,16 @@ func (mtrs *Store) Add(m api.Metric) {
// Latest returns all the last known valid metrics. A metric is valid
// if it has not expired.
func (mtrs *Store) Latest(name string) []api.Metric {
func (mtrs *Store) Latest(name string) []*api.Metric {
mtrs.mux.RLock()
defer mtrs.mux.RUnlock()
byPeer, ok := mtrs.byName[name]
if !ok {
return []api.Metric{}
return []*api.Metric{}
}
metrics := make([]api.Metric, 0, len(byPeer))
metrics := make([]*api.Metric, 0, len(byPeer))
for _, window := range byPeer {
m, err := window.Latest()
if err != nil || m.Discard() {
@ -71,11 +71,11 @@ func (mtrs *Store) Latest(name string) []api.Metric {
// PeerMetrics returns the latest metrics for a given peer ID for
// all known metrics types. It may return expired metrics.
func (mtrs *Store) PeerMetrics(pid peer.ID) []api.Metric {
func (mtrs *Store) PeerMetrics(pid peer.ID) []*api.Metric {
mtrs.mux.RLock()
defer mtrs.mux.RUnlock()
result := make([]api.Metric, 0)
result := make([]*api.Metric, 0)
for _, byPeer := range mtrs.byName {
window, ok := byPeer[pid]

View File

@ -11,7 +11,7 @@ import (
func TestStoreLatest(t *testing.T) {
store := NewStore()
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Value: "1",

View File

@ -8,13 +8,13 @@ import (
// PeersetFilter removes all metrics not belonging to the given
// peerset
func PeersetFilter(metrics []api.Metric, peerset []peer.ID) []api.Metric {
func PeersetFilter(metrics []*api.Metric, peerset []peer.ID) []*api.Metric {
peerMap := make(map[peer.ID]struct{})
for _, pid := range peerset {
peerMap[pid] = struct{}{}
}
filtered := make([]api.Metric, 0, len(metrics))
filtered := make([]*api.Metric, 0, len(metrics))
for _, metric := range metrics {
_, ok := peerMap[metric.Peer]

View File

@ -18,7 +18,7 @@ var ErrNoMetrics = errors.New("no metrics have been added to this window")
// Window implements a circular queue to store metrics.
type Window struct {
last int
window []api.Metric
window []*api.Metric
}
// NewWindow creates an instance with the given
@ -28,7 +28,7 @@ func NewWindow(windowCap int) *Window {
panic("invalid windowCap")
}
w := make([]api.Metric, 0, windowCap)
w := make([]*api.Metric, 0, windowCap)
return &Window{
last: 0,
window: w,
@ -38,7 +38,7 @@ func NewWindow(windowCap int) *Window {
// Add adds a new metric to the window. If the window capacity
// has been reached, the oldest metric (by the time it was added),
// will be discarded.
func (mw *Window) Add(m api.Metric) {
func (mw *Window) Add(m *api.Metric) {
if len(mw.window) < cap(mw.window) {
mw.window = append(mw.window, m)
mw.last = len(mw.window) - 1
@ -53,9 +53,9 @@ func (mw *Window) Add(m api.Metric) {
// Latest returns the last metric added. It returns an error
// if no metrics were added.
func (mw *Window) Latest() (api.Metric, error) {
func (mw *Window) Latest() (*api.Metric, error) {
if len(mw.window) == 0 {
return api.Metric{}, ErrNoMetrics
return nil, ErrNoMetrics
}
return mw.window[mw.last], nil
}
@ -63,9 +63,9 @@ func (mw *Window) Latest() (api.Metric, error) {
// All returns all the metrics in the window, in the inverse order
// they were Added. That is, result[0] will be the last added
// metric.
func (mw *Window) All() []api.Metric {
func (mw *Window) All() []*api.Metric {
wlen := len(mw.window)
res := make([]api.Metric, 0, wlen)
res := make([]*api.Metric, 0, wlen)
if wlen == 0 {
return res
}

View File

@ -7,6 +7,17 @@ import (
"github.com/ipfs/ipfs-cluster/api"
)
func makeMetric(value string) *api.Metric {
metr := &api.Metric{
Name: "test",
Peer: "peer1",
Value: value,
Valid: true,
}
metr.SetTTL(5 * time.Second)
return metr
}
func TestMetricsWindow(t *testing.T) {
mw := NewWindow(4)
@ -19,15 +30,7 @@ func TestMetricsWindow(t *testing.T) {
t.Error("expected 0 metrics")
}
metr := api.Metric{
Name: "test",
Peer: "peer1",
Value: "1",
Valid: true,
}
metr.SetTTL(5 * time.Second)
mw.Add(metr)
mw.Add(makeMetric("1"))
metr2, err := mw.Latest()
if err != nil {
@ -38,10 +41,8 @@ func TestMetricsWindow(t *testing.T) {
t.Error("expected different value")
}
metr.Value = "2"
mw.Add(metr)
metr.Value = "3"
mw.Add(metr)
mw.Add(makeMetric("2"))
mw.Add(makeMetric("3"))
all := mw.All()
if len(all) != 3 {
@ -56,10 +57,8 @@ func TestMetricsWindow(t *testing.T) {
t.Error("older metric should be second")
}
metr.Value = "4"
mw.Add(metr)
metr.Value = "5"
mw.Add(metr)
mw.Add(makeMetric("4"))
mw.Add(makeMetric("5"))
all = mw.All()
if len(all) != 4 {

View File

@ -130,7 +130,7 @@ func (mon *Monitor) logFromPubsub() {
metric.Peer,
)
err = mon.LogMetric(ctx, metric)
err = mon.LogMetric(ctx, &metric)
if err != nil {
logger.Error(err)
continue
@ -170,7 +170,7 @@ func (mon *Monitor) Shutdown(ctx context.Context) error {
}
// LogMetric stores a metric so it can later be retrieved.
func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) LogMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/LogMetric")
defer span.End()
@ -180,7 +180,7 @@ func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
}
// PublishMetric broadcasts a metric to all current cluster peers.
func (mon *Monitor) PublishMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) PublishMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/PublishMetric")
defer span.End()
@ -235,7 +235,7 @@ func (mon *Monitor) getPeers(ctx context.Context) ([]peer.ID, error) {
// LatestMetrics returns last known VALID metrics of a given type. A metric
// is only valid if it has not expired and belongs to a current cluster peers.
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric {
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []*api.Metric {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/LatestMetrics")
defer span.End()
@ -244,7 +244,7 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Make sure we only return metrics in the current peerset
peers, err := mon.getPeers(ctx)
if err != nil {
return []api.Metric{}
return []*api.Metric{}
}
return metrics.PeersetFilter(latest, peers)
@ -252,6 +252,6 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Alerts returns a channel on which alerts are sent when the
// monitor detects a failure.
func (mon *Monitor) Alerts() <-chan api.Alert {
func (mon *Monitor) Alerts() <-chan *api.Alert {
return mon.checker.Alerts()
}

View File

@ -35,10 +35,10 @@ func newMetricFactory() *metricFactory {
}
}
func (mf *metricFactory) newMetric(n string, p peer.ID) api.Metric {
func (mf *metricFactory) newMetric(n string, p peer.ID) *api.Metric {
mf.l.Lock()
defer mf.l.Unlock()
m := api.Metric{
m := &api.Metric{
Name: n,
Peer: p,
Value: fmt.Sprintf("%d", mf.counter),
@ -111,7 +111,7 @@ func TestLogMetricConcurrent(t *testing.T) {
f := func() {
defer wg.Done()
for i := 0; i < 25; i++ {
mt := api.Metric{
mt := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Value: fmt.Sprintf("%d", time.Now().UnixNano()),

View File

@ -139,7 +139,7 @@ func (mpt *MapPinTracker) pin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSPin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -158,7 +158,7 @@ func (mpt *MapPinTracker) unpin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSUnpin",
op.Pin().ToSerial(),
op.Pin().Cid,
&struct{}{},
)
if err != nil {
@ -168,7 +168,7 @@ func (mpt *MapPinTracker) unpin(op *optracker.Operation) error {
}
// puts a new operation on the queue, unless ongoing exists
func (mpt *MapPinTracker) enqueue(ctx context.Context, c api.Pin, typ optracker.OperationType, ch chan *optracker.Operation) error {
func (mpt *MapPinTracker) enqueue(ctx context.Context, c *api.Pin, typ optracker.OperationType, ch chan *optracker.Operation) error {
ctx, span := trace.StartSpan(ctx, "tracker/map/enqueue")
defer span.End()
@ -191,7 +191,7 @@ func (mpt *MapPinTracker) enqueue(ctx context.Context, c api.Pin, typ optracker.
// Track tells the MapPinTracker to start managing a Cid,
// possibly triggering Pin operations on the IPFS daemon.
func (mpt *MapPinTracker) Track(ctx context.Context, c api.Pin) error {
func (mpt *MapPinTracker) Track(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "tracker/map/Track")
defer span.End()
@ -239,7 +239,7 @@ func (mpt *MapPinTracker) Untrack(ctx context.Context, c cid.Cid) error {
// Status returns information for a Cid tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Status")
defer span.End()
@ -248,7 +248,7 @@ func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// StatusAll returns information for all Cids tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) StatusAll(ctx context.Context) []api.PinInfo {
func (mpt *MapPinTracker) StatusAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/StatusAll")
defer span.End()
@ -263,7 +263,7 @@ func (mpt *MapPinTracker) StatusAll(ctx context.Context) []api.PinInfo {
// Pins in error states can be recovered with Recover().
// An error is returned if we are unable to contact
// the IPFS daemon.
func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Sync")
defer span.End()
@ -272,7 +272,7 @@ func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, err
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
@ -292,12 +292,12 @@ func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, err
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/SyncAll")
defer span.End()
var ipsMap map[string]api.IPFSPinStatus
var results []api.PinInfo
var results []*api.PinInfo
err := mpt.rpcClient.Call(
"",
"Cluster",
@ -323,7 +323,7 @@ func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
status := mpt.StatusAll(ctx)
for _, pInfoOrig := range status {
var pInfoNew api.PinInfo
var pInfoNew *api.PinInfo
c := pInfoOrig.Cid
ips, ok := ipsMap[c.String()]
if !ok {
@ -341,7 +341,7 @@ func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
return results, nil
}
func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPFSPinStatus) *api.PinInfo {
status, ok := mpt.optracker.Status(ctx, c)
if !ok {
status = api.TrackerStatusUnpinned
@ -403,7 +403,7 @@ func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPF
// Recover will re-queue a Cid in error state for the failed operation,
// possibly retriggering an IPFS pinning operation.
func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Recover")
defer span.End()
@ -421,12 +421,12 @@ func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo,
}
// RecoverAll attempts to recover all items tracked by this peer.
func (mpt *MapPinTracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
func (mpt *MapPinTracker) RecoverAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/RecoverAll")
defer span.End()
pInfos := mpt.optracker.GetAll(ctx)
var results []api.PinInfo
var results []*api.PinInfo
for _, pInfo := range pInfos {
res, err := mpt.Recover(ctx, pInfo.Cid)
results = append(results, res)

View File

@ -37,9 +37,8 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.TestSlowCid1:
time.Sleep(2 * time.Second)
case pinCancelCid:
@ -48,9 +47,8 @@ func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *str
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
func (mock *mockService) IPFSUnpin(ctx context.Context, in cid.Cid, out *struct{}) error {
switch in.String() {
case test.TestSlowCid1:
time.Sleep(2 * time.Second)
case unpinCancelCid:
@ -59,7 +57,7 @@ func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *s
return nil
}
func testPin(c cid.Cid, min, max int, allocs ...peer.ID) api.Pin {
func testPin(c cid.Cid, min, max int, allocs ...peer.ID) *api.Pin {
pin := api.PinCid(c)
pin.ReplicationFactorMin = min
pin.ReplicationFactorMax = max

View File

@ -56,7 +56,7 @@ type Operation struct {
// RO fields
opType OperationType
pin api.Pin
pin *api.Pin
// RW fields
mu sync.RWMutex
@ -66,7 +66,7 @@ type Operation struct {
}
// NewOperation creates a new Operation.
func NewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase) *Operation {
func NewOperation(ctx context.Context, pin *api.Pin, typ OperationType, ph Phase) *Operation {
ctx, span := trace.StartSpan(ctx, "optracker/NewOperation")
defer span.End()
@ -147,7 +147,7 @@ func (op *Operation) Type() OperationType {
}
// Pin returns the Pin object associated to the operation.
func (op *Operation) Pin() api.Pin {
func (op *Operation) Pin() *api.Pin {
return op.pin
}

View File

@ -45,7 +45,7 @@ func NewOperationTracker(ctx context.Context, pid peer.ID, peerName string) *Ope
//
// If an operation exists it is of different type, it is
// cancelled and the new one replaces it in the tracker.
func (opt *OperationTracker) TrackNewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase) *Operation {
func (opt *OperationTracker) TrackNewOperation(ctx context.Context, pin *api.Pin, typ OperationType, ph Phase) *Operation {
ctx = trace.NewContext(opt.ctx, trace.FromContext(ctx))
ctx, span := trace.StartSpan(ctx, "optracker/TrackNewOperation")
defer span.End()
@ -140,7 +140,7 @@ func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) a
}
// Get returns a PinInfo object for Cid.
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End()
@ -151,12 +151,12 @@ func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
if pInfo.Cid == cid.Undef {
pInfo.Cid = c
}
return pInfo
return &pInfo
}
// GetExists returns a PinInfo object for a Cid only if there exists
// an associated Operation.
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinInfo, bool) {
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (*api.PinInfo, bool) {
ctx, span := trace.StartSpan(ctx, "optracker/GetExists")
defer span.End()
@ -164,22 +164,23 @@ func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinI
defer opt.mu.RUnlock()
op, ok := opt.operations[c.String()]
if !ok {
return api.PinInfo{}, false
return nil, false
}
pInfo := opt.unsafePinInfo(ctx, op)
return pInfo, true
return &pInfo, true
}
// GetAll returns PinInfo objets for all known operations.
func (opt *OperationTracker) GetAll(ctx context.Context) []api.PinInfo {
func (opt *OperationTracker) GetAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End()
var pinfos []api.PinInfo
var pinfos []*api.PinInfo
opt.mu.RLock()
defer opt.mu.RUnlock()
for _, op := range opt.operations {
pinfos = append(pinfos, opt.unsafePinInfo(ctx, op))
pinfo := opt.unsafePinInfo(ctx, op)
pinfos = append(pinfos, &pinfo)
}
return pinfos
}
@ -228,13 +229,14 @@ func (opt *OperationTracker) OpContext(ctx context.Context, c cid.Cid) context.C
// Operations that matched the provided filter. Note, only supports
// filters of type OperationType or Phase, any other type
// will result in a nil slice being returned.
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []api.PinInfo {
var pinfos []api.PinInfo
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []*api.PinInfo {
var pinfos []*api.PinInfo
opt.mu.RLock()
defer opt.mu.RUnlock()
ops := filterOpsMap(ctx, opt.operations, filters)
for _, op := range ops {
pinfos = append(pinfos, opt.unsafePinInfo(ctx, op))
pinfo := opt.unsafePinInfo(ctx, op)
pinfos = append(pinfos, &pinfo)
}
return pinfos
}

View File

@ -46,8 +46,8 @@ func mockRPCClient(t testing.TB) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
c := in.Cid
switch c.String() {
case test.TestSlowCid1:
time.Sleep(3 * time.Second)
@ -57,8 +57,8 @@ func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *str
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
switch in.Cid {
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
switch in.String() {
case test.TestCid1, test.TestCid2:
*out = api.IPFSPinStatusRecursive
case test.TestCid4:
@ -70,9 +70,8 @@ func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
func (mock *mockService) IPFSUnpin(ctx context.Context, in cid.Cid, out *struct{}) error {
switch in.String() {
case test.TestSlowCid1:
time.Sleep(3 * time.Second)
case unpinCancelCid:
@ -89,27 +88,29 @@ func (mock *mockService) IPFSPinLs(ctx context.Context, in string, out *map[stri
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
*out = []api.PinSerial{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts).ToSerial(),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts).ToSerial(),
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
*out = []*api.Pin{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case test.ErrorCid:
return errors.New("expected error when using ErrorCid")
case test.TestCid1, test.TestCid2:
*out = api.PinWithOpts(test.MustDecodeCid(in.Cid), pinOpts).ToSerial()
pin := api.PinWithOpts(in, pinOpts)
*out = *pin
return nil
}
*out = in
pin := api.PinCid(in)
*out = *pin
return nil
}
var sortPinInfoByCid = func(p []api.PinInfo) {
var sortPinInfoByCid = func(p []*api.PinInfo) {
sort.Slice(p, func(i, j int) bool {
return p[i].Cid.String() < p[j].Cid.String()
})
@ -151,7 +152,7 @@ func testStatelessPinTracker(t testing.TB) *stateless.Tracker {
func TestPinTracker_Track(t *testing.T) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -187,7 +188,7 @@ func TestPinTracker_Track(t *testing.T) {
func BenchmarkPinTracker_Track(b *testing.B) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -259,13 +260,13 @@ func TestPinTracker_Untrack(t *testing.T) {
func TestPinTracker_StatusAll(t *testing.T) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
}{
{
"basic stateless statusall",
@ -273,16 +274,16 @@ func TestPinTracker_StatusAll(t *testing.T) {
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusRemote,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid3),
Status: api.TrackerStatusPinned,
},
@ -294,8 +295,8 @@ func TestPinTracker_StatusAll(t *testing.T) {
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
testMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
@ -307,8 +308,8 @@ func TestPinTracker_StatusAll(t *testing.T) {
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
@ -320,8 +321,8 @@ func TestPinTracker_StatusAll(t *testing.T) {
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
testSlowMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
@ -501,7 +502,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
@ -513,12 +514,12 @@ func TestPinTracker_SyncAll(t *testing.T) {
},
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},
@ -534,12 +535,12 @@ func TestPinTracker_SyncAll(t *testing.T) {
},
testMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},
@ -555,12 +556,12 @@ func TestPinTracker_SyncAll(t *testing.T) {
},
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},
@ -576,12 +577,12 @@ func TestPinTracker_SyncAll(t *testing.T) {
},
testSlowMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},
@ -717,30 +718,30 @@ func TestPinTracker_Sync(t *testing.T) {
func TestPinTracker_RecoverAll(t *testing.T) {
type args struct {
tracker ipfscluster.PinTracker
pin api.Pin // only used by maptracker
pin *api.Pin // only used by maptracker
}
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
"basic stateless recoverall",
args{
testStatelessPinTracker(t),
api.Pin{},
&api.Pin{},
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusRemote,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid3),
Status: api.TrackerStatusPinned,
},
@ -753,8 +754,8 @@ func TestPinTracker_RecoverAll(t *testing.T) {
testMapPinTracker(t),
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},

View File

@ -123,7 +123,7 @@ func (spt *Tracker) pin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSPin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -142,7 +142,7 @@ func (spt *Tracker) unpin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSUnpin",
op.Pin().ToSerial(),
op.Pin().Cid,
&struct{}{},
)
if err != nil {
@ -152,7 +152,7 @@ func (spt *Tracker) unpin(op *optracker.Operation) error {
}
// Enqueue puts a new operation on the queue, unless ongoing exists.
func (spt *Tracker) enqueue(ctx context.Context, c api.Pin, typ optracker.OperationType) error {
func (spt *Tracker) enqueue(ctx context.Context, c *api.Pin, typ optracker.OperationType) error {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/enqueue")
defer span.End()
@ -217,7 +217,7 @@ func (spt *Tracker) Shutdown(ctx context.Context) error {
// Track tells the StatelessPinTracker to start managing a Cid,
// possibly triggering Pin operations on the IPFS daemon.
func (spt *Tracker) Track(ctx context.Context, c api.Pin) error {
func (spt *Tracker) Track(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Track")
defer span.End()
@ -263,7 +263,7 @@ func (spt *Tracker) Untrack(ctx context.Context, c cid.Cid) error {
}
// StatusAll returns information for all Cids pinned to the local IPFS node.
func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
func (spt *Tracker) StatusAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll")
defer span.End()
@ -280,7 +280,7 @@ func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
pininfos[infop.Cid.String()] = infop
}
var pis []api.PinInfo
var pis []*api.PinInfo
for _, pi := range pininfos {
pis = append(pis, pi)
}
@ -288,7 +288,7 @@ func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
}
// Status returns information for a Cid pinned to the local IPFS node.
func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
func (spt *Tracker) Status(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Status")
defer span.End()
@ -300,18 +300,18 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// check global state to see if cluster should even be caring about
// the provided cid
var gpinS api.PinSerial
var gpin api.Pin
err := spt.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
&gpinS,
c,
&gpin,
)
if err != nil {
if rpc.IsRPCError(err) {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusClusterError,
@ -320,7 +320,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
}
}
// not part of global state. we should not care about
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusUnpinned,
@ -328,11 +328,9 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
}
}
gpin := gpinS.ToPin()
// check if pin is a meta pin
if gpin.Type == api.MetaType {
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusSharded,
@ -342,7 +340,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// check if pin is a remote pin
if gpin.IsRemotePin(spt.peerID) {
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -356,22 +354,20 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
if err != nil {
logger.Error(err)
return api.PinInfo{}
return nil
}
pi := api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
TS: time.Now(),
}
return pi
}
// SyncAll verifies that the statuses of all tracked Cids (from the shared state)
@ -382,7 +378,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (spt *Tracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
func (spt *Tracker) SyncAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/SyncAll")
defer span.End()
@ -409,7 +405,7 @@ func (spt *Tracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
}
// Sync returns the updated local status for the given Cid.
func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Sync")
defer span.End()
@ -421,18 +417,18 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
if oppi.Status == api.TrackerStatusUnpinError {
// check global state to see if cluster should even be caring about
// the provided cid
var gpin api.PinSerial
var gpin api.Pin
err := spt.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
c,
&gpin,
)
if err != nil {
if rpc.IsRPCError(err) {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusClusterError,
@ -442,7 +438,7 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
// it isn't in the global state
spt.optracker.CleanError(ctx, c)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusUnpinned,
@ -450,9 +446,9 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}, nil
}
// check if pin is a remote pin
if gpin.ToPin().IsRemotePin(spt.peerID) {
if gpin.IsRemotePin(spt.peerID) {
spt.optracker.CleanError(ctx, c)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -468,12 +464,12 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
if err != nil {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusPinError,
@ -483,7 +479,7 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
if ips.ToTrackerStatus() == api.TrackerStatusPinned {
spt.optracker.CleanError(ctx, c)
pi := api.PinInfo{
pi := &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
@ -497,12 +493,12 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
// RecoverAll attempts to recover all items tracked by this peer.
func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
func (spt *Tracker) RecoverAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll")
defer span.End()
statuses := spt.StatusAll(ctx)
resp := make([]api.PinInfo, 0)
resp := make([]*api.PinInfo, 0)
for _, st := range statuses {
r, err := spt.Recover(ctx, st.Cid)
if err != nil {
@ -516,7 +512,7 @@ func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
// Recover will re-track or re-untrack a Cid in error state,
// possibly retriggering an IPFS pinning operation and returning
// only when it is done.
func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover")
defer span.End()
@ -540,7 +536,7 @@ func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error)
return spt.Status(ctx, c), nil
}
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo, error) {
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll")
defer span.End()
@ -557,14 +553,14 @@ func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo,
logger.Error(err)
return nil, err
}
pins := make(map[string]api.PinInfo, 0)
pins := make(map[string]*api.PinInfo, 0)
for cidstr, ips := range ipsMap {
c, err := cid.Decode(cidstr)
if err != nil {
logger.Error(err)
continue
}
p := api.PinInfo{
p := &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
@ -578,30 +574,26 @@ func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo,
// localStatus returns a joint set of consensusState and ipfsStatus
// marking pins which should be meta or remote and leaving any ipfs pins that
// aren't in the consensusState out.
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]api.PinInfo, error) {
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/localStatus")
defer span.End()
pininfos := make(map[string]api.PinInfo)
pininfos := make(map[string]*api.PinInfo)
// get shared state
var statePinsSerial []api.PinSerial
var statePins []*api.Pin
err := spt.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Pins",
struct{}{},
&statePinsSerial,
&statePins,
)
if err != nil {
logger.Error(err)
return nil, err
}
var statePins []api.Pin
for _, p := range statePinsSerial {
statePins = append(statePins, p.ToPin())
}
// get statuses from ipfs node first
localpis, err := spt.ipfsStatusAll(ctx)
@ -614,7 +606,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
pCid := p.Cid.String()
if p.Type == api.MetaType && incExtra {
// add pin to pininfos with sharded status
pininfos[pCid] = api.PinInfo{
pininfos[pCid] = &api.PinInfo{
Cid: p.Cid,
Peer: spt.peerID,
Status: api.TrackerStatusSharded,
@ -625,7 +617,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
if p.IsRemotePin(spt.peerID) && incExtra {
// add pin to pininfos with a status of remote
pininfos[pCid] = api.PinInfo{
pininfos[pCid] = &api.PinInfo{
Cid: p.Cid,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -641,7 +633,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
return pininfos, nil
}
func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo {
func (spt *Tracker) getErrorsAll(ctx context.Context) []*api.PinInfo {
return spt.optracker.Filter(ctx, optracker.PhaseError)
}

View File

@ -39,9 +39,8 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.TestSlowCid1:
time.Sleep(2 * time.Second)
case pinCancelCid:
@ -50,9 +49,8 @@ func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *str
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
func (mock *mockService) IPFSUnpin(ctx context.Context, in cid.Cid, out *struct{}) error {
switch in.String() {
case test.TestSlowCid1:
time.Sleep(2 * time.Second)
case unpinCancelCid:
@ -69,8 +67,8 @@ func (mock *mockService) IPFSPinLs(ctx context.Context, in string, out *map[stri
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
switch in.Cid {
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
switch in.String() {
case test.TestCid1, test.TestCid2:
*out = api.IPFSPinStatusRecursive
default:
@ -79,20 +77,20 @@ func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
*out = []api.PinSerial{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts).ToSerial(),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts).ToSerial(),
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
*out = []*api.Pin{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case test.ErrorCid:
return errors.New("expected error when using ErrorCid")
case test.TestCid1, test.TestCid2:
*out = api.PinWithOpts(test.MustDecodeCid(in.Cid), pinOpts).ToSerial()
*out = *api.PinWithOpts(in, pinOpts)
return nil
default:
return errors.New("not found")
@ -361,7 +359,7 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
}
}
var sortPinInfoByCid = func(p []api.PinInfo) {
var sortPinInfoByCid = func(p []*api.PinInfo) {
sort.Slice(p, func(i, j int) bool {
return p[i].Cid.String() < p[j].Cid.String()
})
@ -375,7 +373,7 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
@ -387,12 +385,12 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
},
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},
@ -408,12 +406,12 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
},
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
[]*api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid1),
Status: api.TrackerStatusPinned,
},
api.PinInfo{
{
Cid: test.MustDecodeCid(test.TestCid2),
Status: api.TrackerStatusPinned,
},

View File

@ -8,7 +8,7 @@ import (
// IsRemotePin determines whether a Pin's ReplicationFactor has
// been met, so as to either pin or unpin it from the peer.
func IsRemotePin(c api.Pin, pid peer.ID) bool {
func IsRemotePin(c *api.Pin, pid peer.ID) bool {
if c.ReplicationFactorMax < 0 {
return false
}

View File

@ -99,18 +99,18 @@ func (pm *Manager) RmPeer(pid peer.ID) error {
// if the peer has dns addresses, return only those, otherwise
// return all. In all cases, encapsulate the peer ID.
func (pm *Manager) filteredPeerAddrs(p peer.ID) []ma.Multiaddr {
func (pm *Manager) filteredPeerAddrs(p peer.ID) []api.Multiaddr {
all := pm.host.Peerstore().Addrs(p)
peerAddrs := []ma.Multiaddr{}
peerDNSAddrs := []ma.Multiaddr{}
peerAddrs := []api.Multiaddr{}
peerDNSAddrs := []api.Multiaddr{}
peerPart, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", peer.IDB58Encode(p)))
for _, a := range all {
encAddr := a.Encapsulate(peerPart)
if madns.Matches(encAddr) {
peerDNSAddrs = append(peerDNSAddrs, encAddr)
peerDNSAddrs = append(peerDNSAddrs, api.NewMultiaddrWithValue(encAddr))
} else {
peerAddrs = append(peerAddrs, encAddr)
peerAddrs = append(peerAddrs, api.NewMultiaddrWithValue(encAddr))
}
}
@ -125,7 +125,7 @@ func (pm *Manager) filteredPeerAddrs(p peer.ID) []ma.Multiaddr {
// /ipfs/<peerID> part) for the given set of peers. For peers for which
// we know DNS multiaddresses, we only return those. Otherwise, we return
// all the multiaddresses known for that peer.
func (pm *Manager) PeersAddresses(peers []peer.ID) []ma.Multiaddr {
func (pm *Manager) PeersAddresses(peers []peer.ID) []api.Multiaddr {
if pm.host == nil {
return nil
}
@ -134,7 +134,7 @@ func (pm *Manager) PeersAddresses(peers []peer.ID) []ma.Multiaddr {
return nil
}
var addrs []ma.Multiaddr
var addrs []api.Multiaddr
for _, p := range peers {
if p == pm.host.ID() {
continue
@ -200,7 +200,7 @@ func (pm *Manager) LoadPeerstore() (addrs []ma.Multiaddr) {
// SavePeerstore stores a slice of multiaddresses in the peerstore file, one
// per line.
func (pm *Manager) SavePeerstore(addrs []ma.Multiaddr) {
func (pm *Manager) SavePeerstore(addrs []api.Multiaddr) {
if pm.peerstorePath == "" {
return
}
@ -220,7 +220,7 @@ func (pm *Manager) SavePeerstore(addrs []ma.Multiaddr) {
defer f.Close()
for _, a := range addrs {
f.Write([]byte(fmt.Sprintf("%s\n", a.String())))
f.Write([]byte(fmt.Sprintf("%s\n", a.Value().String())))
}
}

View File

@ -7,6 +7,10 @@ import (
"go.opencensus.io/trace"
cid "github.com/ipfs/go-cid"
multiaddr "github.com/multiformats/go-multiaddr"
"github.com/ipfs/ipfs-cluster/api"
)
@ -26,55 +30,47 @@ type RPCAPI struct {
*/
// ID runs Cluster.ID()
func (rpcapi *RPCAPI) ID(ctx context.Context, in struct{}, out *api.IDSerial) error {
id := rpcapi.c.ID(ctx).ToSerial()
*out = id
func (rpcapi *RPCAPI) ID(ctx context.Context, in struct{}, out *api.ID) error {
id := rpcapi.c.ID(ctx)
*out = *id
return nil
}
// Pin runs Cluster.Pin().
func (rpcapi *RPCAPI) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
return rpcapi.c.Pin(ctx, in.ToPin())
func (rpcapi *RPCAPI) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
return rpcapi.c.Pin(ctx, in)
}
// Unpin runs Cluster.Unpin().
func (rpcapi *RPCAPI) Unpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.DecodeCid()
return rpcapi.c.Unpin(ctx, c)
func (rpcapi *RPCAPI) Unpin(ctx context.Context, in cid.Cid, out *struct{}) error {
return rpcapi.c.Unpin(ctx, in)
}
// PinPath resolves path into a cid and runs Cluster.Pin().
func (rpcapi *RPCAPI) PinPath(ctx context.Context, in api.PinPath, out *api.PinSerial) error {
func (rpcapi *RPCAPI) PinPath(ctx context.Context, in *api.PinPath, out *api.Pin) error {
pin, err := rpcapi.c.PinPath(ctx, in)
*out = pin.ToSerial()
*out = *pin
return err
}
// UnpinPath resolves path into a cid and runs Cluster.Unpin().
func (rpcapi *RPCAPI) UnpinPath(ctx context.Context, in string, out *api.PinSerial) error {
func (rpcapi *RPCAPI) UnpinPath(ctx context.Context, in string, out *api.Pin) error {
pin, err := rpcapi.c.UnpinPath(ctx, in)
*out = pin.ToSerial()
*out = *pin
return err
}
// Pins runs Cluster.Pins().
func (rpcapi *RPCAPI) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
func (rpcapi *RPCAPI) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
cidList := rpcapi.c.Pins(ctx)
cidSerialList := make([]api.PinSerial, 0, len(cidList))
for _, c := range cidList {
cidSerialList = append(cidSerialList, c.ToSerial())
}
*out = cidSerialList
*out = cidList
return nil
}
// PinGet runs Cluster.PinGet().
func (rpcapi *RPCAPI) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
cidarg := in.ToPin()
pin, err := rpcapi.c.PinGet(ctx, cidarg.Cid)
if err == nil {
*out = pin.ToSerial()
}
func (rpcapi *RPCAPI) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
pin, err := rpcapi.c.PinGet(ctx, in)
*out = *pin
return err
}
@ -87,28 +83,22 @@ func (rpcapi *RPCAPI) Version(ctx context.Context, in struct{}, out *api.Version
}
// Peers runs Cluster.Peers().
func (rpcapi *RPCAPI) Peers(ctx context.Context, in struct{}, out *[]api.IDSerial) error {
peers := rpcapi.c.Peers(ctx)
var sPeers []api.IDSerial
for _, p := range peers {
sPeers = append(sPeers, p.ToSerial())
}
*out = sPeers
func (rpcapi *RPCAPI) Peers(ctx context.Context, in struct{}, out *[]*api.ID) error {
*out = rpcapi.c.Peers(ctx)
return nil
}
// PeerAdd runs Cluster.PeerAdd().
func (rpcapi *RPCAPI) PeerAdd(ctx context.Context, in string, out *api.IDSerial) error {
pid, _ := peer.IDB58Decode(in)
id, err := rpcapi.c.PeerAdd(ctx, pid)
*out = id.ToSerial()
func (rpcapi *RPCAPI) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error {
id, err := rpcapi.c.PeerAdd(ctx, in)
*out = *id
return err
}
// ConnectGraph runs Cluster.GetConnectGraph().
func (rpcapi *RPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraphSerial) error {
func (rpcapi *RPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error {
graph, err := rpcapi.c.ConnectGraph()
*out = graph.ToSerial()
*out = graph
return err
}
@ -118,112 +108,104 @@ func (rpcapi *RPCAPI) PeerRemove(ctx context.Context, in peer.ID, out *struct{})
}
// Join runs Cluster.Join().
func (rpcapi *RPCAPI) Join(ctx context.Context, in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := rpcapi.c.Join(ctx, addr)
func (rpcapi *RPCAPI) Join(ctx context.Context, in multiaddr.Multiaddr, out *struct{}) error {
err := rpcapi.c.Join(ctx, in)
return err
}
// StatusAll runs Cluster.StatusAll().
func (rpcapi *RPCAPI) StatusAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (rpcapi *RPCAPI) StatusAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.StatusAll(ctx)
*out = GlobalPinInfoSliceToSerial(pinfos)
*out = pinfos
return err
}
// StatusAllLocal runs Cluster.StatusAllLocal().
func (rpcapi *RPCAPI) StatusAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) StatusAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos := rpcapi.c.StatusAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
*out = pinfos
return nil
}
// Status runs Cluster.Status().
func (rpcapi *RPCAPI) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Status(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Status(ctx, in)
*out = *pinfo
return err
}
// StatusLocal runs Cluster.StatusLocal().
func (rpcapi *RPCAPI) StatusLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo := rpcapi.c.StatusLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) StatusLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo := rpcapi.c.StatusLocal(ctx, in)
*out = *pinfo
return nil
}
// SyncAll runs Cluster.SyncAll().
func (rpcapi *RPCAPI) SyncAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (rpcapi *RPCAPI) SyncAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.SyncAll(ctx)
*out = GlobalPinInfoSliceToSerial(pinfos)
*out = pinfos
return err
}
// SyncAllLocal runs Cluster.SyncAllLocal().
func (rpcapi *RPCAPI) SyncAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) SyncAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos, err := rpcapi.c.SyncAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
*out = pinfos
return err
}
// Sync runs Cluster.Sync().
func (rpcapi *RPCAPI) Sync(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Sync(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Sync(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Sync(ctx, in)
*out = *pinfo
return err
}
// SyncLocal runs Cluster.SyncLocal().
func (rpcapi *RPCAPI) SyncLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.SyncLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) SyncLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo, err := rpcapi.c.SyncLocal(ctx, in)
*out = *pinfo
return err
}
// RecoverAllLocal runs Cluster.RecoverAllLocal().
func (rpcapi *RPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos, err := rpcapi.c.RecoverAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
*out = pinfos
return err
}
// Recover runs Cluster.Recover().
func (rpcapi *RPCAPI) Recover(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Recover(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Recover(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Recover(ctx, in)
*out = *pinfo
return err
}
// RecoverLocal runs Cluster.RecoverLocal().
func (rpcapi *RPCAPI) RecoverLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.RecoverLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) RecoverLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo, err := rpcapi.c.RecoverLocal(ctx, in)
*out = *pinfo
return err
}
// BlockAllocate returns allocations for blocks. This is used in the adders.
// It's different from pin allocations when ReplicationFactor < 0.
func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
pin := in.ToPin()
err := rpcapi.c.setupPin(ctx, &pin)
func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
err := rpcapi.c.setupPin(ctx, in)
if err != nil {
return err
}
// Return the current peer list.
if pin.ReplicationFactorMin < 0 {
if in.ReplicationFactorMin < 0 {
// Returned metrics are Valid and belong to current
// Cluster peers.
metrics := rpcapi.c.monitor.LatestMetrics(ctx, pingMetricName)
peers := make([]string, len(metrics), len(metrics))
peers := make([]peer.ID, len(metrics), len(metrics))
for i, m := range metrics {
peers[i] = peer.IDB58Encode(m.Peer)
peers[i] = m.Peer
}
*out = peers
@ -232,9 +214,9 @@ func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *
allocs, err := rpcapi.c.allocate(
ctx,
pin.Cid,
pin.ReplicationFactorMin,
pin.ReplicationFactorMax,
in.Cid,
in.ReplicationFactorMin,
in.ReplicationFactorMax,
[]peer.ID{}, // blacklist
[]peer.ID{}, // prio list
)
@ -243,14 +225,14 @@ func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *
return err
}
*out = api.PeersToStrings(allocs)
*out = allocs
return nil
}
// SendInformerMetric runs Cluster.sendInformerMetric().
func (rpcapi *RPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
m, err := rpcapi.c.sendInformerMetric(ctx)
*out = m
*out = *m
return err
}
@ -259,54 +241,51 @@ func (rpcapi *RPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *
*/
// Track runs PinTracker.Track().
func (rpcapi *RPCAPI) Track(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Track(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Track")
defer span.End()
return rpcapi.c.tracker.Track(ctx, in.ToPin())
return rpcapi.c.tracker.Track(ctx, in)
}
// Untrack runs PinTracker.Untrack().
func (rpcapi *RPCAPI) Untrack(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Untrack(ctx context.Context, in cid.Cid, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Untrack")
defer span.End()
c := in.DecodeCid()
return rpcapi.c.tracker.Untrack(ctx, c)
return rpcapi.c.tracker.Untrack(ctx, in)
}
// TrackerStatusAll runs PinTracker.StatusAll().
func (rpcapi *RPCAPI) TrackerStatusAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerStatusAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll")
defer span.End()
*out = pinInfoSliceToSerial(rpcapi.c.tracker.StatusAll(ctx))
*out = rpcapi.c.tracker.StatusAll(ctx)
return nil
}
// TrackerStatus runs PinTracker.Status().
func (rpcapi *RPCAPI) TrackerStatus(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerStatus(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Status")
defer span.End()
c := in.DecodeCid()
pinfo := rpcapi.c.tracker.Status(ctx, c)
*out = pinfo.ToSerial()
pinfo := rpcapi.c.tracker.Status(ctx, in)
*out = *pinfo
return nil
}
// TrackerRecoverAll runs PinTracker.RecoverAll().f
func (rpcapi *RPCAPI) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll")
defer span.End()
pinfos, err := rpcapi.c.tracker.RecoverAll(ctx)
*out = pinInfoSliceToSerial(pinfos)
*out = pinfos
return err
}
// TrackerRecover runs PinTracker.Recover().
func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Recover")
defer span.End()
c := in.DecodeCid()
pinfo, err := rpcapi.c.tracker.Recover(ctx, c)
*out = pinfo.ToSerial()
pinfo, err := rpcapi.c.tracker.Recover(ctx, in)
*out = *pinfo
return err
}
@ -315,24 +294,20 @@ func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in api.PinSerial, out
*/
// IPFSPin runs IPFSConnector.Pin().
func (rpcapi *RPCAPI) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/ipfsconn/IPFSPin")
defer span.End()
c := in.DecodeCid()
depth := in.ToPin().MaxDepth
return rpcapi.c.ipfs.Pin(ctx, c, depth)
return rpcapi.c.ipfs.Pin(ctx, in.Cid, in.MaxDepth)
}
// IPFSUnpin runs IPFSConnector.Unpin().
func (rpcapi *RPCAPI) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.DecodeCid()
return rpcapi.c.ipfs.Unpin(ctx, c)
func (rpcapi *RPCAPI) IPFSUnpin(ctx context.Context, in cid.Cid, out *struct{}) error {
return rpcapi.c.ipfs.Unpin(ctx, in)
}
// IPFSPinLsCid runs IPFSConnector.PinLsCid().
func (rpcapi *RPCAPI) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
c := in.DecodeCid()
b, err := rpcapi.c.ipfs.PinLsCid(ctx, c)
func (rpcapi *RPCAPI) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
b, err := rpcapi.c.ipfs.PinLsCid(ctx, in)
*out = b
return err
}
@ -360,28 +335,36 @@ func (rpcapi *RPCAPI) IPFSConfigKey(ctx context.Context, in string, out *interfa
// IPFSRepoStat runs IPFSConnector.RepoStat().
func (rpcapi *RPCAPI) IPFSRepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error {
res, err := rpcapi.c.ipfs.RepoStat(ctx)
*out = res
if err != nil {
return err
}
*out = *res
return err
}
// IPFSSwarmPeers runs IPFSConnector.SwarmPeers().
func (rpcapi *RPCAPI) IPFSSwarmPeers(ctx context.Context, in struct{}, out *api.SwarmPeersSerial) error {
func (rpcapi *RPCAPI) IPFSSwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
res, err := rpcapi.c.ipfs.SwarmPeers(ctx)
*out = res.ToSerial()
return err
if err != nil {
return err
}
*out = res
return nil
}
// IPFSBlockPut runs IPFSConnector.BlockPut().
func (rpcapi *RPCAPI) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
func (rpcapi *RPCAPI) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
return rpcapi.c.ipfs.BlockPut(ctx, in)
}
// IPFSBlockGet runs IPFSConnector.BlockGet().
func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in api.PinSerial, out *[]byte) error {
c := in.DecodeCid()
res, err := rpcapi.c.ipfs.BlockGet(ctx, c)
func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in cid.Cid, out *[]byte) error {
res, err := rpcapi.c.ipfs.BlockGet(ctx, in)
if err != nil {
return err
}
*out = res
return err
return nil
}
/*
@ -389,19 +372,17 @@ func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in api.PinSerial, out *[
*/
// ConsensusLogPin runs Consensus.LogPin().
func (rpcapi *RPCAPI) ConsensusLogPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogPin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogPin")
defer span.End()
c := in.ToPin()
return rpcapi.c.consensus.LogPin(ctx, c)
return rpcapi.c.consensus.LogPin(ctx, in)
}
// ConsensusLogUnpin runs Consensus.LogUnpin().
func (rpcapi *RPCAPI) ConsensusLogUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogUnpin")
defer span.End()
c := in.ToPin()
return rpcapi.c.consensus.LogUnpin(ctx, c)
return rpcapi.c.consensus.LogUnpin(ctx, in)
}
// ConsensusAddPeer runs Consensus.AddPeer().
@ -421,8 +402,11 @@ func (rpcapi *RPCAPI) ConsensusRmPeer(ctx context.Context, in peer.ID, out *stru
// ConsensusPeers runs Consensus.Peers().
func (rpcapi *RPCAPI) ConsensusPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
peers, err := rpcapi.c.consensus.Peers(ctx)
if err != nil {
return err
}
*out = peers
return err
return nil
}
/*
@ -430,13 +414,13 @@ func (rpcapi *RPCAPI) ConsensusPeers(ctx context.Context, in struct{}, out *[]pe
*/
// PeerMonitorLogMetric runs PeerMonitor.LogMetric().
func (rpcapi *RPCAPI) PeerMonitorLogMetric(ctx context.Context, in api.Metric, out *struct{}) error {
func (rpcapi *RPCAPI) PeerMonitorLogMetric(ctx context.Context, in *api.Metric, out *struct{}) error {
rpcapi.c.monitor.LogMetric(ctx, in)
return nil
}
// PeerMonitorLatestMetrics runs PeerMonitor.LatestMetrics().
func (rpcapi *RPCAPI) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]api.Metric) error {
func (rpcapi *RPCAPI) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]*api.Metric) error {
*out = rpcapi.c.monitor.LatestMetrics(ctx, in)
return nil
}

View File

@ -68,21 +68,22 @@ func CopyPIDsToIfaces(in []peer.ID) []interface{} {
return ifaces
}
// CopyIDSerialsToIfaces converts an api.IDSerial slice to an empty interface
// CopyIDsToIfaces converts an api.ID slice to an empty interface
// slice using pointers to each elements of the original slice.
// Useful to handle gorpc.MultiCall() replies.
func CopyIDSerialsToIfaces(in []api.IDSerial) []interface{} {
func CopyIDsToIfaces(in []*api.ID) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
in[i] = &api.ID{}
ifaces[i] = in[i]
}
return ifaces
}
// CopyIDSerialSliceToIfaces converts an api.IDSerial slice of slices
// CopyIDSliceToIfaces converts an api.ID slice of slices
// to an empty interface slice using pointers to each elements of the
// original slice. Useful to handle gorpc.MultiCall() replies.
func CopyIDSerialSliceToIfaces(in [][]api.IDSerial) []interface{} {
func CopyIDSliceToIfaces(in [][]*api.ID) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -90,21 +91,22 @@ func CopyIDSerialSliceToIfaces(in [][]api.IDSerial) []interface{} {
return ifaces
}
// CopyPinInfoSerialToIfaces converts an api.PinInfoSerial slice to
// CopyPinInfoToIfaces converts an api.PinInfo slice to
// an empty interface slice using pointers to each elements of
// the original slice. Useful to handle gorpc.MultiCall() replies.
func CopyPinInfoSerialToIfaces(in []api.PinInfoSerial) []interface{} {
func CopyPinInfoToIfaces(in []*api.PinInfo) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
in[i] = &api.PinInfo{}
ifaces[i] = in[i]
}
return ifaces
}
// CopyPinInfoSerialSliceToIfaces converts an api.PinInfoSerial slice of slices
// CopyPinInfoSliceToIfaces converts an api.PinInfo slice of slices
// to an empty interface slice using pointers to each elements of the original
// slice. Useful to handle gorpc.MultiCall() replies.
func CopyPinInfoSerialSliceToIfaces(in [][]api.PinInfoSerial) []interface{} {
func CopyPinInfoSliceToIfaces(in [][]*api.PinInfo) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]

View File

@ -64,11 +64,11 @@ func New(dstore ds.Datastore, namespace string, handle codec.Handle) (*State, er
}
// Add adds a new Pin or replaces an existing one.
func (st *State) Add(ctx context.Context, c api.Pin) error {
func (st *State) Add(ctx context.Context, c *api.Pin) error {
_, span := trace.StartSpan(ctx, "state/dsstate/Add")
defer span.End()
ps, err := st.serializePin(&c)
ps, err := st.serializePin(c)
if err != nil {
return err
}
@ -91,7 +91,7 @@ func (st *State) Rm(ctx context.Context, c cid.Cid) error {
// Get returns a Pin from the store and whether it
// was present. When not present, a default pin
// is returned.
func (st *State) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
func (st *State) Get(ctx context.Context, c cid.Cid) (*api.Pin, bool) {
_, span := trace.StartSpan(ctx, "state/dsstate/Get")
defer span.End()
@ -103,7 +103,7 @@ func (st *State) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
if err != nil {
return api.PinCid(c), false
}
return *p, true
return p, true
}
// Has returns whether a Cid is stored.
@ -120,7 +120,7 @@ func (st *State) Has(ctx context.Context, c cid.Cid) bool {
// List returns the unsorted list of all Pins that have been added to the
// datastore.
func (st *State) List(ctx context.Context) []api.Pin {
func (st *State) List(ctx context.Context) []*api.Pin {
_, span := trace.StartSpan(ctx, "state/dsstate/List")
defer span.End()
@ -130,11 +130,11 @@ func (st *State) List(ctx context.Context) []api.Pin {
results, err := st.ds.Query(q)
if err != nil {
return []api.Pin{}
return []*api.Pin{}
}
defer results.Close()
var pins []api.Pin
var pins []*api.Pin
for r := range results.Next() {
if r.Error != nil {
@ -155,7 +155,7 @@ func (st *State) List(ctx context.Context) []api.Pin {
continue
}
pins = append(pins, *p)
pins = append(pins, p)
}
return pins
}

View File

@ -16,15 +16,15 @@ import (
// objects which objects are pinned. This component should be thread safe.
type State interface {
// Add adds a pin to the State
Add(context.Context, api.Pin) error
Add(context.Context, *api.Pin) error
// Rm removes a pin from the State
Rm(context.Context, cid.Cid) error
// List lists all the pins in the state
List(context.Context) []api.Pin
List(context.Context) []*api.Pin
// Has returns true if the state is holding information for a Cid
Has(context.Context, cid.Cid) bool
// Get returns the information attacthed to this pin
Get(context.Context, cid.Cid) (api.Pin, bool)
Get(context.Context, cid.Cid) (*api.Pin, bool)
// Migrate restores the serialized format of an outdated state to the current version
Migrate(ctx context.Context, r io.Reader) error
// Return the version of this state

View File

@ -50,7 +50,7 @@ func NewMapState() state.State {
}
// Add adds a Pin to the internal map.
func (st *MapState) Add(ctx context.Context, c api.Pin) error {
func (st *MapState) Add(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "state/map/Add")
defer span.End()
return st.dst.Add(ctx, c)
@ -69,7 +69,7 @@ func (st *MapState) Rm(ctx context.Context, c cid.Cid) error {
// fields initialized, regardless of the
// presence of the provided Cid in the state.
// To check the presence, use MapState.Has(cid.Cid).
func (st *MapState) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
func (st *MapState) Get(ctx context.Context, c cid.Cid) (*api.Pin, bool) {
ctx, span := trace.StartSpan(ctx, "state/map/Get")
defer span.End()
@ -84,7 +84,7 @@ func (st *MapState) Has(ctx context.Context, c cid.Cid) bool {
}
// List provides the list of tracked Pins.
func (st *MapState) List(ctx context.Context) []api.Pin {
func (st *MapState) List(ctx context.Context) []*api.Pin {
ctx, span := trace.StartSpan(ctx, "state/map/List")
defer span.End()
return st.dst.List(ctx)

View File

@ -16,7 +16,7 @@ import (
var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
var c = api.Pin{
var c = &api.Pin{
Cid: testCid1,
Type: api.DataType,
Allocations: []peer.ID{testPeerID1},

View File

@ -10,6 +10,8 @@ import (
"errors"
"io"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
msgpack "github.com/multiformats/go-multicodec/msgpack"
@ -200,18 +202,26 @@ func (st *mapStateV5) next() migrateable {
logger.Infof("migrating", k, v.Cid)
// we need to convert because we added codec struct fields
// and thus serialization is not the same.
p := api.PinSerial{}
p.Cid = v.Cid
p.Type = v.Type
p.Allocations = v.Allocations
p := &api.Pin{}
c, err := cid.Decode(v.Cid)
if err != nil {
logger.Error(err)
}
p.Cid = c
p.Type = api.PinType(v.Type)
p.Allocations = api.StringsToPeers(v.Allocations)
p.MaxDepth = v.MaxDepth
p.Reference = v.Reference
r, err := cid.Decode(v.Reference)
if err == nil {
p.Reference = &r
}
p.ReplicationFactorMax = v.ReplicationFactorMax
p.ReplicationFactorMin = v.ReplicationFactorMin
p.Name = v.Name
p.ShardSize = v.ShardSize
v6.Add(context.Background(), p.ToPin())
v6.Add(context.Background(), p)
}
return v6.(*MapState)
}

View File

@ -39,86 +39,86 @@ func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client {
return c
}
func (mock *mockService) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
if in.Cid == ErrorCid {
func (mock *mockService) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
if in.Cid.String() == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) Unpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
if in.Cid == ErrorCid {
func (mock *mockService) Unpin(ctx context.Context, in cid.Cid, out *struct{}) error {
if in.String() == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) PinPath(ctx context.Context, in api.PinPath, out *api.PinSerial) error {
func (mock *mockService) PinPath(ctx context.Context, in *api.PinPath, out *api.Pin) error {
_, err := gopath.ParsePath(in.Path)
if err != nil {
return err
}
*out = api.PinWithOpts(MustDecodeCid(TestCidResolved), in.PinOptions).ToSerial()
*out = *api.PinWithOpts(MustDecodeCid(TestCidResolved), in.PinOptions)
return nil
}
func (mock *mockService) UnpinPath(ctx context.Context, in string, out *api.PinSerial) error {
func (mock *mockService) UnpinPath(ctx context.Context, in string, out *api.Pin) error {
_, err := gopath.ParsePath(in)
if err != nil {
return err
}
*out = api.PinCid(MustDecodeCid(TestCidResolved)).ToSerial()
*out = *api.PinCid(MustDecodeCid(TestCidResolved))
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
opts := api.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
}
*out = []api.PinSerial{
api.PinWithOpts(MustDecodeCid(TestCid1), opts).ToSerial(),
api.PinCid(MustDecodeCid(TestCid2)).ToSerial(),
api.PinWithOpts(MustDecodeCid(TestCid3), opts).ToSerial(),
*out = []*api.Pin{
api.PinWithOpts(MustDecodeCid(TestCid1), opts),
api.PinCid(MustDecodeCid(TestCid2)),
api.PinWithOpts(MustDecodeCid(TestCid3), opts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case ErrorCid:
return errors.New("this is an expected error when using ErrorCid")
case TestCid1, TestCid3:
p := api.PinCid(MustDecodeCid(in.Cid)).ToSerial()
p := api.PinCid(in)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
*out = p
*out = *p
return nil
case TestCid2: // This is a remote pin
p := api.PinCid(MustDecodeCid(in.Cid)).ToSerial()
p := api.PinCid(in)
p.ReplicationFactorMin = 1
p.ReplicationFactorMax = 1
*out = p
*out = *p
default:
return errors.New("not found")
}
return nil
}
func (mock *mockService) ID(ctx context.Context, in struct{}, out *api.IDSerial) error {
func (mock *mockService) ID(ctx context.Context, in struct{}, out *api.ID) error {
//_, pubkey, _ := crypto.GenerateKeyPair(
// DefaultConfigCrypto,
// DefaultConfigKeyLength)
*out = api.IDSerial{
ID: TestPeerID1.Pretty(),
addr, _ := api.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/ipfs/" + TestPeerID1.Pretty())
*out = api.ID{
ID: TestPeerID1,
//PublicKey: pubkey,
Version: "0.0.mock",
IPFS: api.IPFSIDSerial{
ID: TestPeerID1.Pretty(),
Addresses: api.MultiaddrsSerial{
api.MultiaddrSerial("/ip4/127.0.0.1/tcp/4001/ipfs/" + TestPeerID1.Pretty()),
},
IPFS: api.IPFSID{
ID: TestPeerID1,
Addresses: []api.Multiaddr{addr},
},
}
return nil
@ -131,16 +131,16 @@ func (mock *mockService) Version(ctx context.Context, in struct{}, out *api.Vers
return nil
}
func (mock *mockService) Peers(ctx context.Context, in struct{}, out *[]api.IDSerial) error {
id := api.IDSerial{}
mock.ID(ctx, in, &id)
func (mock *mockService) Peers(ctx context.Context, in struct{}, out *[]*api.ID) error {
id := &api.ID{}
mock.ID(ctx, in, id)
*out = []api.IDSerial{id}
*out = []*api.ID{id}
return nil
}
func (mock *mockService) PeerAdd(ctx context.Context, in string, out *api.IDSerial) error {
id := api.IDSerial{}
func (mock *mockService) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error {
id := api.ID{}
mock.ID(ctx, struct{}{}, &id)
*out = id
return nil
@ -150,37 +150,38 @@ func (mock *mockService) PeerRemove(ctx context.Context, in peer.ID, out *struct
return nil
}
func (mock *mockService) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraphSerial) error {
*out = api.ConnectGraphSerial{
ClusterID: TestPeerID1.Pretty(),
IPFSLinks: map[string][]string{
TestPeerID4.Pretty(): []string{TestPeerID5.Pretty(), TestPeerID6.Pretty()},
TestPeerID5.Pretty(): []string{TestPeerID4.Pretty(), TestPeerID6.Pretty()},
TestPeerID6.Pretty(): []string{TestPeerID4.Pretty(), TestPeerID5.Pretty()},
func (mock *mockService) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error {
*out = api.ConnectGraph{
ClusterID: TestPeerID1,
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(TestPeerID4): []peer.ID{TestPeerID5, TestPeerID6},
peer.IDB58Encode(TestPeerID5): []peer.ID{TestPeerID4, TestPeerID6},
peer.IDB58Encode(TestPeerID6): []peer.ID{TestPeerID4, TestPeerID5},
},
ClusterLinks: map[string][]string{
TestPeerID1.Pretty(): []string{TestPeerID2.Pretty(), TestPeerID3.Pretty()},
TestPeerID2.Pretty(): []string{TestPeerID1.Pretty(), TestPeerID3.Pretty()},
TestPeerID3.Pretty(): []string{TestPeerID1.Pretty(), TestPeerID2.Pretty()},
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(TestPeerID1): []peer.ID{TestPeerID2, TestPeerID3},
peer.IDB58Encode(TestPeerID2): []peer.ID{TestPeerID1, TestPeerID3},
peer.IDB58Encode(TestPeerID3): []peer.ID{TestPeerID1, TestPeerID2},
},
ClustertoIPFS: map[string]string{
TestPeerID1.Pretty(): TestPeerID4.Pretty(),
TestPeerID2.Pretty(): TestPeerID5.Pretty(),
TestPeerID3.Pretty(): TestPeerID6.Pretty(),
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(TestPeerID1): TestPeerID4,
peer.IDB58Encode(TestPeerID2): TestPeerID5,
peer.IDB58Encode(TestPeerID3): TestPeerID6,
},
}
return nil
}
func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
c1, _ := cid.Decode(TestCid1)
c2, _ := cid.Decode(TestCid2)
c3, _ := cid.Decode(TestCid3)
*out = globalPinInfoSliceToSerial([]api.GlobalPinInfo{
pid := peer.IDB58Encode(TestPeerID1)
*out = []*api.GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: c1,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
@ -190,8 +191,8 @@ func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]api.
},
{
Cid: c2,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: c2,
Peer: TestPeerID1,
Status: api.TrackerStatusPinning,
@ -201,8 +202,8 @@ func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]api.
},
{
Cid: c3,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: c3,
Peer: TestPeerID1,
Status: api.TrackerStatusPinError,
@ -210,70 +211,69 @@ func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]api.
},
},
},
})
}
return nil
}
func (mock *mockService) StatusAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) StatusAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.TrackerStatusAll(ctx, in, out)
}
func (mock *mockService) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
if in.Cid == ErrorCid {
func (mock *mockService) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
if in.String() == ErrorCid {
return ErrBadCid
}
c1, _ := cid.Decode(TestCid1)
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(TestPeerID1): {
Cid: in,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
}.ToSerial()
}
return nil
}
func (mock *mockService) StatusLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) StatusLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.TrackerStatus(ctx, in, out)
}
func (mock *mockService) SyncAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (mock *mockService) SyncAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
return mock.StatusAll(ctx, in, out)
}
func (mock *mockService) SyncAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) SyncAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.StatusAllLocal(ctx, in, out)
}
func (mock *mockService) Sync(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (mock *mockService) Sync(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
return mock.Status(ctx, in, out)
}
func (mock *mockService) SyncLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) SyncLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.StatusLocal(ctx, in, out)
}
func (mock *mockService) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) RecoverAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.TrackerRecoverAll(ctx, in, out)
}
func (mock *mockService) Recover(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (mock *mockService) Recover(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
return mock.Status(ctx, in, out)
}
func (mock *mockService) RecoverLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) RecoverLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.TrackerRecover(ctx, in, out)
}
func (mock *mockService) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (mock *mockService) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("replMin too high: can only mock-allocate to 1")
}
*out = []string{""} // local peer
*out = in.Allocations
return nil
}
@ -283,19 +283,19 @@ func (mock *mockService) SendInformerMetric(ctx context.Context, in struct{}, ou
/* Tracker methods */
func (mock *mockService) Track(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) Track(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) Untrack(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) Untrack(ctx context.Context, in cid.Cid, out *struct{}) error {
return nil
}
func (mock *mockService) TrackerStatusAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) TrackerStatusAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
c1, _ := cid.Decode(TestCid1)
c3, _ := cid.Decode(TestCid3)
*out = pinInfoSliceToSerial([]api.PinInfo{
*out = []*api.PinInfo{
{
Cid: c1,
Peer: TestPeerID1,
@ -308,74 +308,72 @@ func (mock *mockService) TrackerStatusAll(ctx context.Context, in struct{}, out
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
})
}
return nil
}
func (mock *mockService) TrackerStatus(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
if in.Cid == ErrorCid {
func (mock *mockService) TrackerStatus(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
if in.String() == ErrorCid {
return ErrBadCid
}
c1, _ := cid.Decode(TestCid1)
*out = api.PinInfo{
Cid: c1,
Cid: in,
Peer: TestPeerID2,
Status: api.TrackerStatusPinned,
TS: time.Now(),
}.ToSerial()
}
return nil
}
func (mock *mockService) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
*out = make([]api.PinInfoSerial, 0, 0)
func (mock *mockService) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
*out = make([]*api.PinInfo, 0, 0)
return nil
}
func (mock *mockService) TrackerRecover(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
in2 := in.ToPin()
func (mock *mockService) TrackerRecover(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
*out = api.PinInfo{
Cid: in2.Cid,
Cid: in,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
}.ToSerial()
}
return nil
}
/* PeerMonitor methods */
// PeerMonitorLogMetric runs PeerMonitor.LogMetric().
func (mock *mockService) PeerMonitorLogMetric(ctx context.Context, in api.Metric, out *struct{}) error {
func (mock *mockService) PeerMonitorLogMetric(ctx context.Context, in *api.Metric, out *struct{}) error {
return nil
}
// PeerMonitorLatestMetrics runs PeerMonitor.LatestMetrics().
func (mock *mockService) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]api.Metric) error {
m := api.Metric{
func (mock *mockService) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]*api.Metric) error {
m := &api.Metric{
Name: "test",
Peer: TestPeerID1,
Value: "0",
Valid: true,
}
m.SetTTL(2 * time.Second)
last := []api.Metric{m}
last := []*api.Metric{m}
*out = last
return nil
}
/* IPFSConnector methods */
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) IPFSUnpin(ctx context.Context, in cid.Cid, out *struct{}) error {
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
if in.Cid == TestCid1 || in.Cid == TestCid3 {
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
if in.String() == TestCid1 || in.String() == TestCid3 {
*out = api.IPFSPinStatusRecursive
} else {
*out = api.IPFSPinStatusUnpinned
@ -396,8 +394,8 @@ func (mock *mockService) IPFSConnectSwarms(ctx context.Context, in struct{}, out
return nil
}
func (mock *mockService) IPFSSwarmPeers(ctx context.Context, in struct{}, out *api.SwarmPeersSerial) error {
*out = []string{TestPeerID2.Pretty(), TestPeerID3.Pretty()}
func (mock *mockService) IPFSSwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
*out = []peer.ID{TestPeerID2, TestPeerID3}
return nil
}
@ -421,7 +419,7 @@ func (mock *mockService) IPFSRepoStat(ctx context.Context, in struct{}, out *api
return nil
}
func (mock *mockService) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
func (mock *mockService) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
return nil
}
@ -437,21 +435,3 @@ func (mock *mockService) ConsensusPeers(ctx context.Context, in struct{}, out *[
*out = []peer.ID{TestPeerID1, TestPeerID2, TestPeerID3}
return nil
}
// FIXME: dup from util.go
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
// FIXME: dup from util.go
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}

29
util.go
View File

@ -7,7 +7,6 @@ import (
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
@ -52,34 +51,6 @@ func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID {
// return addrs
// }
// If we have connections open to that PID and they are using a different addr
// then we return the one we are using, otherwise the one provided
func getRemoteMultiaddr(h host.Host, pid peer.ID, addr ma.Multiaddr) ma.Multiaddr {
conns := h.Network().ConnsToPeer(pid)
if len(conns) > 0 {
return api.MustLibp2pMultiaddrJoin(conns[0].RemoteMultiaddr(), pid)
}
return api.MustLibp2pMultiaddrJoin(addr, pid)
}
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}
// GlobalPinInfoSliceToSerial is a helper function for serializing a slice of
// api.GlobalPinInfos.
func GlobalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
func logError(fmtstr string, args ...interface{}) error {
msg := fmt.Sprintf(fmtstr, args...)
logger.Error(msg)