Merge pull request #688 from ipfs/feat/remove-serial

Remove *Serial types. Use pointers for all types.
This commit is contained in:
Hector Sanjuan 2019-03-01 14:33:25 +00:00 committed by GitHub
commit 121660aa2f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 2043 additions and 2659 deletions

View File

@ -55,7 +55,6 @@ func TestAddPeerDown(t *testing.T) {
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
err := clusters[0].Shutdown(ctx)
if err != nil {
t.Fatal(err)
@ -84,7 +83,6 @@ func TestAddPeerDown(t *testing.T) {
if c.id == clusters[0].id {
return
}
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)

View File

@ -94,7 +94,7 @@ func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (cid.Cid
// FromFiles adds content from a files.Directory. The adder will no longer
// be usable after calling this method.
func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (cid.Cid, error) {
logger.Debugf("adding from files")
logger.Debug("adding from files")
a.setContext(ctx)
if a.ctx.Err() != nil { // don't allow running twice

View File

@ -57,7 +57,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
return err
}
nodeSerial := &api.NodeWithMeta{
Cid: node.Cid().String(),
Cid: node.Cid(),
Data: node.RawData(),
CumSize: size,
}
@ -78,7 +78,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, root cid.Cid) (cid.Cid, err
"",
"Cluster",
"Pin",
rootPin.ToSerial(),
rootPin,
&struct{}{},
)
}

View File

@ -7,6 +7,8 @@ import (
"sync"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
adder "github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
@ -19,21 +21,23 @@ type testRPC struct {
pins sync.Map
}
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid, in)
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
rpcs.pins.Store(in.Cid, in)
func (rpcs *testRPC) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
rpcs.pins.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
*out = []string{""}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases.
*out = []peer.ID{test.PeerID1}
return nil
}

View File

@ -126,8 +126,8 @@ func makeDAG(ctx context.Context, dagObj map[string]cid.Cid) ([]ipld.Node, error
func putDAG(ctx context.Context, rpcC *rpc.Client, nodes []ipld.Node, dests []peer.ID) error {
for _, n := range nodes {
//logger.Debugf("The dag cbor Node Links: %+v", n.Links())
b := api.NodeWithMeta{
Cid: n.Cid().String(), // Tests depend on this.
b := &api.NodeWithMeta{
Cid: n.Cid(), // Tests depend on this.
Data: n.RawData(),
Format: "cbor",
}

View File

@ -76,7 +76,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
return err
}
nodeSerial := &api.NodeWithMeta{
Cid: node.Cid().String(),
Cid: node.Cid(),
Data: node.RawData(),
CumSize: size,
}
@ -122,7 +122,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
clusterDAGPin.MaxDepth = 0 // pin direct
clusterDAGPin.Name = fmt.Sprintf("%s-clusterDAG", dgs.pinOpts.Name)
clusterDAGPin.Type = api.ClusterDAGType
clusterDAGPin.Reference = dataRoot
clusterDAGPin.Reference = &dataRoot
err = adder.Pin(ctx, dgs.rpcClient, clusterDAGPin)
if err != nil {
return dataRoot, err
@ -131,7 +131,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
// Pin the META pin
metaPin := api.PinWithOpts(dataRoot, dgs.pinOpts)
metaPin.Type = api.MetaType
metaPin.Reference = clusterDAG
metaPin.Reference = &clusterDAG
metaPin.MaxDepth = 0 // irrelevant. Meta-pins are not pinned
err = adder.Pin(ctx, dgs.rpcClient, metaPin)
if err != nil {
@ -180,14 +180,9 @@ func (dgs *DAGService) ingestBlock(ctx context.Context, n *api.NodeWithMeta) err
logger.Debugf("ingesting block %s in shard %d (%s)", n.Cid, len(dgs.shards), dgs.pinOpts.Name)
c, err := cid.Decode(n.Cid)
if err != nil {
return err
}
// add the block to it if it fits and return
if shard.Size()+n.Size() < shard.Limit() {
shard.AddLink(ctx, c, n.Size())
shard.AddLink(ctx, n.Cid, n.Size())
return adder.PutBlock(ctx, dgs.rpcClient, n, shard.Allocations())
}
@ -207,7 +202,7 @@ func (dgs *DAGService) ingestBlock(ctx context.Context, n *api.NodeWithMeta) err
return errors.New("block doesn't fit in empty shard: shard size too small?")
}
_, err = dgs.flushCurrentShard(ctx)
_, err := dgs.flushCurrentShard(ctx)
if err != nil {
return err
}

View File

@ -10,6 +10,7 @@ import (
adder "github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
peer "github.com/libp2p/go-libp2p-peer"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
@ -26,30 +27,32 @@ type testRPC struct {
pins sync.Map
}
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid, in.Data)
func (rpcs *testRPC) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
rpcs.blocks.Store(in.Cid.String(), in.Data)
return nil
}
func (rpcs *testRPC) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
rpcs.pins.Store(in.Cid, in)
func (rpcs *testRPC) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
rpcs.pins.Store(in.Cid.String(), in)
return nil
}
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
*out = []string{""}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases
*out = []peer.ID{test.PeerID1}
return nil
}
func (rpcs *testRPC) PinGet(ctx context.Context, c cid.Cid) (api.Pin, error) {
func (rpcs *testRPC) PinGet(ctx context.Context, c cid.Cid) (*api.Pin, error) {
pI, ok := rpcs.pins.Load(c.String())
if !ok {
return api.Pin{}, errors.New("not found")
return nil, errors.New("not found")
}
return pI.(api.PinSerial).ToPin(), nil
return pI.(*api.Pin), nil
}
func (rpcs *testRPC) BlockGet(ctx context.Context, c cid.Cid) ([]byte, error) {
@ -110,7 +113,7 @@ func TestFromMultipart(t *testing.T) {
// Print all pins
// rpcObj.pins.Range(func(k, v interface{}) bool {
// p := v.(api.PinSerial)
// p := v.(*api.Pin)
// j, _ := config.DefaultJSONMarshal(p)
// fmt.Printf("%s", j)
// return true

View File

@ -92,7 +92,7 @@ func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid,
// this sets allocations as priority allocation
pin.Allocations = sh.allocations
pin.Type = api.ShardType
pin.Reference = prev
pin.Reference = &prev
pin.MaxDepth = 1
pin.ShardSize = sh.Size() // use current size, not the limit
if len(nodes) > len(sh.dagNode)+1 { // using an indirect graph

View File

@ -2,6 +2,7 @@ package sharding
import (
"context"
"errors"
"fmt"
"testing"
@ -13,7 +14,7 @@ import (
// MockPinStore is used in VerifyShards
type MockPinStore interface {
// Gets a pin
PinGet(context.Context, cid.Cid) (api.Pin, error)
PinGet(context.Context, cid.Cid) (*api.Pin, error)
}
// MockBlockStore is used in VerifyShards
@ -36,7 +37,11 @@ func VerifyShards(t *testing.T, rootCid cid.Cid, pins MockPinStore, ipfs MockBlo
return nil, fmt.Errorf("bad MetaPin type")
}
clusterPin, err := pins.PinGet(ctx, metaPin.Reference)
if metaPin.Reference == nil {
return nil, errors.New("metaPin.Reference is unset")
}
clusterPin, err := pins.PinGet(ctx, *metaPin.Reference)
if err != nil {
return nil, fmt.Errorf("cluster pin was not pinned: %s", err)
}

View File

@ -16,17 +16,12 @@ import (
// PutBlock sends a NodeWithMeta to the given destinations.
func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests []peer.ID) error {
c, err := cid.Decode(n.Cid)
if err != nil {
return err
}
format, ok := cid.CodecToStr[c.Type()]
format, ok := cid.CodecToStr[n.Cid.Type()]
if !ok {
format = ""
logger.Warning("unsupported cid type, treating as v0")
}
if c.Prefix().Version == 0 {
if n.Cid.Prefix().Version == 0 {
format = "v0"
}
n.Format = format
@ -40,7 +35,7 @@ func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests [
dests,
"Cluster",
"IPFSBlockPut",
*n,
n,
rpcutil.RPCDiscardReplies(len(dests)),
)
return rpcutil.CheckErrs(errs)
@ -49,20 +44,20 @@ func PutBlock(ctx context.Context, rpc *rpc.Client, n *api.NodeWithMeta, dests [
// BlockAllocate helps allocating blocks to peers.
func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions) ([]peer.ID, error) {
// Find where to allocate this file
var allocsStr []string
var allocsStr []peer.ID
err := rpc.CallContext(
ctx,
"",
"Cluster",
"BlockAllocate",
api.PinWithOpts(cid.Undef, pinOpts).ToSerial(),
api.PinWithOpts(cid.Undef, pinOpts),
&allocsStr,
)
return api.StringsToPeers(allocsStr), err
return allocsStr, err
}
// Pin helps sending local RPC pin requests.
func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
func Pin(ctx context.Context, rpc *rpc.Client, pin *api.Pin) error {
if pin.ReplicationFactorMin < 0 {
pin.Allocations = []peer.ID{}
}
@ -72,7 +67,7 @@ func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
"", // use ourself to pin
"Cluster",
"Pin",
pin.ToSerial(),
pin,
&struct{}{},
)
}

View File

@ -7,6 +7,7 @@ import (
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"go.opencensus.io/trace"
"github.com/ipfs/ipfs-cluster/api"
@ -59,13 +60,16 @@ func (c *Cluster) allocate(ctx context.Context, hash cid.Cid, rplMin, rplMax int
}
// Figure out who is holding the CID
currentPin, _ := c.PinGet(ctx, hash)
currentAllocs := currentPin.Allocations
var currentAllocs []peer.ID
currentPin, err := c.PinGet(ctx, hash)
if err == nil {
currentAllocs = currentPin.Allocations
}
metrics := c.monitor.LatestMetrics(ctx, c.informer.Name())
currentMetrics := make(map[peer.ID]api.Metric)
candidatesMetrics := make(map[peer.ID]api.Metric)
priorityMetrics := make(map[peer.ID]api.Metric)
currentMetrics := make(map[peer.ID]*api.Metric)
candidatesMetrics := make(map[peer.ID]*api.Metric)
priorityMetrics := make(map[peer.ID]*api.Metric)
// Divide metrics between current and candidates.
// All metrics in metrics are valid (at least the
@ -123,9 +127,9 @@ func (c *Cluster) obtainAllocations(
ctx context.Context,
hash cid.Cid,
rplMin, rplMax int,
currentValidMetrics map[peer.ID]api.Metric,
candidatesMetrics map[peer.ID]api.Metric,
priorityMetrics map[peer.ID]api.Metric,
currentValidMetrics map[peer.ID]*api.Metric,
candidatesMetrics map[peer.ID]*api.Metric,
priorityMetrics map[peer.ID]*api.Metric,
) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/obtainAllocations")
defer span.End()

View File

@ -39,7 +39,7 @@ func (alloc AscendAllocator) Shutdown(_ context.Context) error { return nil }
func (alloc AscendAllocator) Allocate(
ctx context.Context,
c cid.Cid,
current, candidates, priority map[peer.ID]api.Metric,
current, candidates, priority map[peer.ID]*api.Metric,
) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, false)

View File

@ -12,8 +12,8 @@ import (
)
type testcase struct {
candidates map[peer.ID]api.Metric
current map[peer.ID]api.Metric
candidates map[peer.ID]*api.Metric
current map[peer.ID]*api.Metric
expected []peer.ID
}
@ -29,7 +29,7 @@ var inAMinute = time.Now().Add(time.Minute).UnixNano()
var testCases = []testcase{
{ // regular sort
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "5",
@ -55,11 +55,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1, peer3, peer2, peer0},
},
{ // filter invalid
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "1",
@ -73,11 +73,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad value
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "abc",
@ -91,7 +91,7 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
}

View File

@ -36,7 +36,7 @@ func (alloc DescendAllocator) Shutdown(_ context.Context) error { return nil }
// carry a numeric value such as "used disk". We do not pay attention to
// the metrics of the currently allocated peers and we just sort the
// candidates based on their metric values (largest to smallest).
func (alloc DescendAllocator) Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error) {
func (alloc DescendAllocator) Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]*api.Metric) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, true)
last := util.SortNumeric(candidates, true)

View File

@ -12,8 +12,8 @@ import (
)
type testcase struct {
candidates map[peer.ID]api.Metric
current map[peer.ID]api.Metric
candidates map[peer.ID]*api.Metric
current map[peer.ID]*api.Metric
expected []peer.ID
}
@ -29,7 +29,7 @@ var inAMinute = time.Now().Add(time.Minute).UnixNano()
var testCases = []testcase{
{ // regular sort
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "5",
@ -55,11 +55,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1, peer3, peer2, peer0},
},
{ // filter invalid
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "1",
@ -73,11 +73,11 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad value
candidates: map[peer.ID]api.Metric{
candidates: map[peer.ID]*api.Metric{
peer0: {
Name: "some-metric",
Value: "abc",
@ -91,7 +91,7 @@ var testCases = []testcase{
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
current: map[peer.ID]*api.Metric{},
expected: []peer.ID{peer1},
},
}

View File

@ -16,7 +16,7 @@ import (
// SortNumeric returns a list of peers sorted by their metric values. If reverse
// is false (true), peers will be sorted from smallest to largest (largest to
// smallest) metric
func SortNumeric(candidates map[peer.ID]api.Metric, reverse bool) []peer.ID {
func SortNumeric(candidates map[peer.ID]*api.Metric, reverse bool) []peer.ID {
vMap := make(map[peer.ID]uint64)
peers := make([]peer.ID, 0, len(candidates))
for k, v := range candidates {

View File

@ -304,7 +304,7 @@ func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Requ
"",
"Cluster",
op,
api.PinCid(c).ToSerial(),
api.PinCid(c),
&struct{}{},
)
if err != nil {
@ -342,23 +342,23 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
ipfsErrorResponder(w, err.Error())
return
}
var pin api.PinSerial
var pin api.Pin
err = proxy.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
c,
&pin,
)
if err != nil {
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg))
return
}
pinLs.Keys[pin.Cid] = ipfsPinType{
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
} else {
pins := make([]api.PinSerial, 0)
pins := make([]*api.Pin, 0)
err := proxy.rpcClient.Call(
"",
"Cluster",
@ -372,7 +372,7 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
}
for _, pin := range pins {
pinLs.Keys[pin.Cid] = ipfsPinType{
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
}
@ -450,7 +450,7 @@ func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
"",
"Cluster",
"Unpin",
api.PinCid(root).ToSerial(),
root,
&struct{}{},
)
if err != nil {
@ -478,10 +478,11 @@ func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers))
defer rpcutil.MultiCancel(cancels)
repoStats := make([]api.IPFSRepoStat, len(peers), len(peers))
repoStats := make([]*api.IPFSRepoStat, len(peers), len(peers))
repoStatsIfaces := make([]interface{}, len(repoStats), len(repoStats))
for i := range repoStats {
repoStatsIfaces[i] = &repoStats[i]
repoStats[i] = &api.IPFSRepoStat{}
repoStatsIfaces[i] = repoStats[i]
}
errs := proxy.rpcClient.MultiCall(

View File

@ -11,6 +11,8 @@ import (
"testing"
"time"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
@ -85,33 +87,33 @@ func TestIPFSProxyPin(t *testing.T) {
type args struct {
urlPath string
testCid string
testCid cid.Cid
statusCode int
}
tests := []struct {
name string
args args
want string
want cid.Cid
wantErr bool
}{
{
"pin good cid query arg",
args{
"/pin/add?arg=",
test.TestCid1,
test.Cid1,
http.StatusOK,
},
test.TestCid1,
test.Cid1,
false,
},
{
"pin good cid url arg",
args{
"/pin/add/",
test.TestCid1,
test.Cid1,
http.StatusOK,
},
test.TestCid1,
test.Cid1,
false,
},
{
@ -121,7 +123,7 @@ func TestIPFSProxyPin(t *testing.T) {
test.ErrorCid,
http.StatusInternalServerError,
},
"",
cid.Undef,
true,
},
{
@ -131,13 +133,18 @@ func TestIPFSProxyPin(t *testing.T) {
test.ErrorCid,
http.StatusInternalServerError,
},
"",
cid.Undef,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u := fmt.Sprintf("%s%s%s", proxyURL(proxy), tt.args.urlPath, tt.args.testCid)
u := fmt.Sprintf(
"%s%s%s",
proxyURL(proxy),
tt.args.urlPath,
tt.args.testCid,
)
res, err := http.Post(u, "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
@ -162,7 +169,7 @@ func TestIPFSProxyPin(t *testing.T) {
t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1)
}
if resp.Pins[0] != tt.want {
if resp.Pins[0] != tt.want.String() {
t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want)
}
case true:
@ -188,33 +195,33 @@ func TestIPFSProxyUnpin(t *testing.T) {
type args struct {
urlPath string
testCid string
testCid cid.Cid
statusCode int
}
tests := []struct {
name string
args args
want string
want cid.Cid
wantErr bool
}{
{
"unpin good cid query arg",
args{
"/pin/rm?arg=",
test.TestCid1,
test.Cid1,
http.StatusOK,
},
test.TestCid1,
test.Cid1,
false,
},
{
"unpin good cid url arg",
args{
"/pin/rm/",
test.TestCid1,
test.Cid1,
http.StatusOK,
},
test.TestCid1,
test.Cid1,
false,
},
{
@ -224,7 +231,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
test.ErrorCid,
http.StatusInternalServerError,
},
"",
cid.Undef,
true,
},
{
@ -234,7 +241,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
test.ErrorCid,
http.StatusInternalServerError,
},
"",
cid.Undef,
true,
},
}
@ -265,7 +272,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1)
}
if resp.Pins[0] != tt.want {
if resp.Pins[0] != tt.want.String() {
t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want)
}
case true:
@ -290,7 +297,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
defer proxy.Shutdown(ctx)
t.Run("pin/ls query arg", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.TestCid1), "", nil)
res, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.Cid1), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -306,14 +313,14 @@ func TestIPFSProxyPinLs(t *testing.T) {
t.Fatal(err)
}
_, ok := resp.Keys[test.TestCid1]
_, ok := resp.Keys[test.Cid1.String()]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}
})
t.Run("pin/ls url arg", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/ls/%s", proxyURL(proxy), test.TestCid1), "", nil)
res, err := http.Post(fmt.Sprintf("%s/pin/ls/%s", proxyURL(proxy), test.Cid1), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -329,9 +336,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
t.Fatal(err)
}
fmt.Println(string(resBytes))
_, ok := resp.Keys[test.TestCid1]
_, ok := resp.Keys[test.Cid1.String()]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}

View File

@ -47,12 +47,12 @@ var logger = logging.Logger(loggingFacility)
// metrics and tracing of requests through the API.
type Client interface {
// ID returns information about the cluster Peer.
ID(context.Context) (api.ID, error)
ID(context.Context) (*api.ID, error)
// Peers requests ID information for all cluster peers.
Peers(context.Context) ([]api.ID, error)
Peers(context.Context) ([]*api.ID, error)
// PeerAdd adds a new peer to the cluster.
PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error)
PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error)
// PeerRm removes a current peer from the cluster
PeerRm(ctx context.Context, pid peer.ID) error
@ -68,58 +68,57 @@ type Client interface {
Unpin(ctx context.Context, ci cid.Cid) error
// PinPath resolves given path into a cid and performs the pin operation.
PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error)
PinPath(ctx context.Context, path string, opts api.PinOptions) (*api.Pin, error)
// UnpinPath resolves given path into a cid and performs the unpin operation.
// It returns api.Pin of the given cid before it is unpinned.
UnpinPath(ctx context.Context, path string) (api.Pin, error)
UnpinPath(ctx context.Context, path string) (*api.Pin, error)
// Allocations returns the consensus state listing all tracked items
// and the peers that should be pinning them.
Allocations(ctx context.Context, filter api.PinType) ([]api.Pin, error)
Allocations(ctx context.Context, filter api.PinType) ([]*api.Pin, error)
// Allocation returns the current allocations for a given Cid.
Allocation(ctx context.Context, ci cid.Cid) (api.Pin, error)
Allocation(ctx context.Context, ci cid.Cid) (*api.Pin, error)
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Status(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// StatusAll gathers Status() for all tracked items.
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error)
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]*api.GlobalPinInfo, error)
// Sync makes sure the state of a Cid corresponds to the state reported
// by the ipfs daemon, and returns it. If local is true, this operation
// only happens on the current peer, otherwise it happens on every
// cluster peer.
Sync(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Sync(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// SyncAll triggers Sync() operations for all tracked items. It only
// returns informations for items that were de-synced or have an error
// state. If local is true, the operation is limited to the current
// peer. Otherwise it happens on every cluster peer.
SyncAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error)
SyncAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error)
// Recover retriggers pin or unpin ipfs operations for a Cid in error
// state. If local is true, the operation is limited to the current
// peer, otherwise it happens on every cluster peer.
Recover(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
Recover(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error)
// RecoverAll triggers Recover() operations on all tracked items. If
// local is true, the operation is limited to the current peer.
// Otherwise, it happens everywhere.
RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error)
RecoverAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error)
// Version returns the ipfs-cluster peer's version.
Version(context.Context) (api.Version, error)
Version(context.Context) (*api.Version, error)
// IPFS returns an instance of go-ipfs-api's Shell, pointing to a
// Cluster's IPFS proxy endpoint.
IPFS(context.Context) *shell.Shell
// GetConnectGraph returns an ipfs-cluster connection graph. The
// serialized version, strings instead of pids, is returned
GetConnectGraph(context.Context) (api.ConnectGraphSerial, error)
// GetConnectGraph returns an ipfs-cluster connection graph.
GetConnectGraph(context.Context) (*api.ConnectGraph, error)
// Metrics returns a map with the latest metrics of matching name
// for the current cluster peers.
Metrics(ctx context.Context, name string) ([]api.Metric, error)
Metrics(ctx context.Context, name string) ([]*api.Metric, error)
}
// Config allows to configure the parameters to connect

View File

@ -289,7 +289,7 @@ func TestIPFS(t *testing.T) {
dc := c.(*defaultClient)
ipfs := dc.IPFS(ctx)
err = ipfs.Pin(test.TestCid1)
err = ipfs.Pin(test.Cid1.String())
if err != nil {
t.Error(err)
}
@ -299,7 +299,7 @@ func TestIPFS(t *testing.T) {
t.Error(err)
}
pin, ok := pins[test.TestCid1]
pin, ok := pins[test.Cid1.String()]
if !ok {
t.Error("pin should be in pin list")
}

View File

@ -24,27 +24,23 @@ import (
)
// ID returns information about the cluster Peer.
func (c *defaultClient) ID(ctx context.Context) (api.ID, error) {
func (c *defaultClient) ID(ctx context.Context) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/ID")
defer span.End()
var id api.IDSerial
var id api.ID
err := c.do(ctx, "GET", "/id", nil, nil, &id)
return id.ToID(), err
return &id, err
}
// Peers requests ID information for all cluster peers.
func (c *defaultClient) Peers(ctx context.Context) ([]api.ID, error) {
func (c *defaultClient) Peers(ctx context.Context) ([]*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/Peers")
defer span.End()
var ids []api.IDSerial
var ids []*api.ID
err := c.do(ctx, "GET", "/peers", nil, nil, &ids)
result := make([]api.ID, len(ids))
for i, id := range ids {
result[i] = id.ToID()
}
return result, err
return ids, err
}
type peerAddBody struct {
@ -52,7 +48,7 @@ type peerAddBody struct {
}
// PeerAdd adds a new peer to the cluster.
func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/PeerAdd")
defer span.End()
@ -63,9 +59,9 @@ func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error
enc := json.NewEncoder(&buf)
enc.Encode(body)
var id api.IDSerial
var id api.ID
err := c.do(ctx, "POST", "/peers", nil, &buf, &id)
return id.ToID(), err
return &id, err
}
// PeerRm removes a current peer from the cluster
@ -105,14 +101,14 @@ func (c *defaultClient) Unpin(ctx context.Context, ci cid.Cid) error {
}
// PinPath allows to pin an element by the given IPFS path.
func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) {
func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/PinPath")
defer span.End()
var pin api.PinSerial
var pin api.Pin
ipfspath, err := gopath.ParsePath(path)
if err != nil {
return api.Pin{}, err
return nil, err
}
err = c.do(
@ -128,32 +124,32 @@ func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOp
&pin,
)
return pin.ToPin(), err
return &pin, err
}
// UnpinPath allows to unpin an item by providing its IPFS path.
// It returns the unpinned api.Pin information of the resolved Cid.
func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) {
func (c *defaultClient) UnpinPath(ctx context.Context, p string) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/UnpinPath")
defer span.End()
var pin api.PinSerial
var pin api.Pin
ipfspath, err := gopath.ParsePath(p)
if err != nil {
return api.Pin{}, err
return nil, err
}
err = c.do(ctx, "DELETE", fmt.Sprintf("/pins%s", ipfspath.String()), nil, nil, &pin)
return pin.ToPin(), err
return &pin, err
}
// Allocations returns the consensus state listing all tracked items and
// the peers that should be pinning them.
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]api.Pin, error) {
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Allocations")
defer span.End()
var pins []api.PinSerial
var pins []*api.Pin
types := []api.PinType{
api.DataType,
@ -176,33 +172,36 @@ func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType) ([]
f := url.QueryEscape(strings.Join(strFilter, ","))
err := c.do(ctx, "GET", fmt.Sprintf("/allocations?filter=%s", f), nil, nil, &pins)
result := make([]api.Pin, len(pins))
for i, p := range pins {
result[i] = p.ToPin()
}
return result, err
return pins, err
}
// Allocation returns the current allocations for a given Cid.
func (c *defaultClient) Allocation(ctx context.Context, ci cid.Cid) (api.Pin, error) {
func (c *defaultClient) Allocation(ctx context.Context, ci cid.Cid) (*api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Allocation")
defer span.End()
var pin api.PinSerial
var pin api.Pin
err := c.do(ctx, "GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
return pin.ToPin(), err
return &pin, err
}
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Status")
defer span.End()
var gpi api.GlobalPinInfoSerial
err := c.do(ctx, "GET", fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
var gpi api.GlobalPinInfo
err := c.do(
ctx,
"GET",
fmt.Sprintf("/pins/%s?local=%t", ci.String(), local),
nil,
nil,
&gpi,
)
return &gpi, err
}
// StatusAll gathers Status() for all tracked items. If a filter is
@ -210,11 +209,11 @@ func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api
// will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all.
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/StatusAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
filterStr := ""
if filter != api.TrackerStatusUndefined { // undefined filter means "all"
@ -224,102 +223,104 @@ func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus,
}
}
err := c.do(ctx, "GET", fmt.Sprintf("/pins?local=%t&filter=%s", local, url.QueryEscape(filterStr)), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
err := c.do(
ctx,
"GET",
fmt.Sprintf("/pins?local=%t&filter=%s", local, url.QueryEscape(filterStr)),
nil,
nil,
&gpis,
)
return gpis, err
}
// Sync makes sure the state of a Cid corresponds to the state reported by
// the ipfs daemon, and returns it. If local is true, this operation only
// happens on the current peer, otherwise it happens on every cluster peer.
func (c *defaultClient) Sync(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Sync(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Sync")
defer span.End()
var gpi api.GlobalPinInfoSerial
err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
var gpi api.GlobalPinInfo
err := c.do(
ctx,
"POST",
fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local),
nil,
nil,
&gpi,
)
return &gpi, err
}
// SyncAll triggers Sync() operations for all tracked items. It only returns
// informations for items that were de-synced or have an error state. If
// local is true, the operation is limited to the current peer. Otherwise
// it happens on every cluster peer.
func (c *defaultClient) SyncAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) SyncAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/SyncAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/sync?local=%t", local), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
return gpis, err
}
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
// If local is true, the operation is limited to the current peer, otherwise
// it happens on every cluster peer.
func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Recover")
defer span.End()
var gpi api.GlobalPinInfoSerial
var gpi api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
return &gpi, err
}
// RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens
// everywhere.
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/RecoverAll")
defer span.End()
var gpis []api.GlobalPinInfoSerial
var gpis []*api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis)
result := make([]api.GlobalPinInfo, len(gpis))
for i, p := range gpis {
result[i] = p.ToGlobalPinInfo()
}
return result, err
return gpis, err
}
// Version returns the ipfs-cluster peer's version.
func (c *defaultClient) Version(ctx context.Context) (api.Version, error) {
func (c *defaultClient) Version(ctx context.Context) (*api.Version, error) {
ctx, span := trace.StartSpan(ctx, "client/Version")
defer span.End()
var ver api.Version
err := c.do(ctx, "GET", "/version", nil, nil, &ver)
return ver, err
return &ver, err
}
// GetConnectGraph returns an ipfs-cluster connection graph.
// The serialized version, strings instead of pids, is returned
func (c *defaultClient) GetConnectGraph(ctx context.Context) (api.ConnectGraphSerial, error) {
func (c *defaultClient) GetConnectGraph(ctx context.Context) (*api.ConnectGraph, error) {
ctx, span := trace.StartSpan(ctx, "client/GetConnectGraph")
defer span.End()
var graphS api.ConnectGraphSerial
err := c.do(ctx, "GET", "/health/graph", nil, nil, &graphS)
return graphS, err
var graph api.ConnectGraph
err := c.do(ctx, "GET", "/health/graph", nil, nil, &graph)
return &graph, err
}
// Metrics returns a map with the latest valid metrics of the given name
// for the current cluster peers.
func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) {
func (c *defaultClient) Metrics(ctx context.Context, name string) ([]*api.Metric, error) {
ctx, span := trace.StartSpan(ctx, "client/Metrics")
defer span.End()
if name == "" {
return nil, errors.New("bad metric name")
}
var metrics []api.Metric
var metrics []*api.Metric
err := c.do(ctx, "GET", fmt.Sprintf("/monitor/metrics/%s", name), nil, nil, &metrics)
return metrics, err
}
@ -332,7 +333,7 @@ func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric,
// peers have transitioned to the target TrackerStatus or are Remote.
// If an error of some type happens, WaitFor returns immediately with an
// empty GlobalPinInfo.
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPinInfo, error) {
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/WaitFor")
defer span.End()
@ -344,14 +345,14 @@ func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPi
go sf.pollStatus(ctx, c, fp)
go sf.filter(ctx, fp)
var status api.GlobalPinInfo
var status *api.GlobalPinInfo
for {
select {
case <-ctx.Done():
return api.GlobalPinInfo{}, ctx.Err()
return nil, ctx.Err()
case err := <-sf.Err:
return api.GlobalPinInfo{}, err
return nil, err
case st, ok := <-sf.Out:
if !ok { // channel closed
return status, nil
@ -371,15 +372,15 @@ type StatusFilterParams struct {
}
type statusFilter struct {
In, Out chan api.GlobalPinInfo
In, Out chan *api.GlobalPinInfo
Done chan struct{}
Err chan error
}
func newStatusFilter() *statusFilter {
return &statusFilter{
In: make(chan api.GlobalPinInfo),
Out: make(chan api.GlobalPinInfo),
In: make(chan *api.GlobalPinInfo),
Out: make(chan *api.GlobalPinInfo),
Done: make(chan struct{}),
Err: make(chan error),
}
@ -437,7 +438,7 @@ func (sf *statusFilter) pollStatus(ctx context.Context, c Client, fp StatusFilte
}
}
func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo) (bool, error) {
func statusReached(target api.TrackerStatus, gblPinInfo *api.GlobalPinInfo) (bool, error) {
for _, pinInfo := range gblPinInfo.PeerMap {
switch pinInfo.Status {
case target:

View File

@ -96,7 +96,7 @@ func TestPeersWithError(t *testing.T) {
if err == nil {
t.Fatal("expected error")
}
if ids == nil || len(ids) != 0 {
if ids != nil {
t.Fatal("expected no ids")
}
}
@ -110,11 +110,11 @@ func TestPeerAdd(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
id, err := c.PeerAdd(ctx, test.TestPeerID1)
id, err := c.PeerAdd(ctx, test.PeerID1)
if err != nil {
t.Fatal(err)
}
if id.ID != test.TestPeerID1 {
if id.ID != test.PeerID1 {
t.Error("bad peer")
}
}
@ -128,7 +128,7 @@ func TestPeerRm(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
err := c.PeerRm(ctx, test.TestPeerID1)
err := c.PeerRm(ctx, test.PeerID1)
if err != nil {
t.Fatal(err)
}
@ -143,13 +143,12 @@ func TestPin(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
opts := types.PinOptions{
ReplicationFactorMin: 6,
ReplicationFactorMax: 7,
Name: "hello there",
}
err := c.Pin(ctx, ci, opts)
err := c.Pin(ctx, test.Cid1, opts)
if err != nil {
t.Fatal(err)
}
@ -164,8 +163,7 @@ func TestUnpin(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
err := c.Unpin(ctx, ci)
err := c.Unpin(ctx, test.Cid1)
if err != nil {
t.Fatal(err)
}
@ -181,27 +179,27 @@ type pathCase struct {
var pathTestCases = []pathCase{
{
test.TestCidResolved,
test.CidResolved.String(),
false,
},
{
test.TestPathIPFS1,
test.PathIPFS1,
false,
},
{
test.TestPathIPFS2,
test.PathIPFS2,
false,
},
{
test.TestPathIPNS1,
test.PathIPNS1,
false,
},
{
test.TestPathIPLD1,
test.PathIPLD1,
false,
},
{
test.TestInvalidPath1,
test.InvalidPath1,
true,
},
}
@ -218,7 +216,7 @@ func TestPinPath(t *testing.T) {
UserAllocations: []string{"QmWPKsvv9VCXmnmX4YGNaYUmB4MbwKyyLsVDYxTQXkNdxt", "QmWPKsvv9VCVTomX4YbNaTUmJ4MbwgyyVsVDtxXQXkNdxt"},
}
resultantPin := types.PinWithOpts(test.MustDecodeCid(test.TestCidResolved), opts)
resultantPin := types.PinWithOpts(test.CidResolved, opts)
testF := func(t *testing.T, c Client) {
@ -234,8 +232,8 @@ func TestPinPath(t *testing.T) {
if !pin.Equals(resultantPin) {
t.Errorf("expected different pin: %s", p)
t.Errorf("expected: %+v", resultantPin.ToSerial())
t.Errorf("actual: %+v", pin.ToSerial())
t.Errorf("expected: %+v", resultantPin)
t.Errorf("actual: %+v", pin)
}
}
@ -260,7 +258,7 @@ func TestUnpinPath(t *testing.T) {
t.Fatalf("unepected error %s: %s", p, err)
}
if pin.Cid.String() != test.TestCidResolved {
if !pin.Cid.Equals(test.CidResolved) {
t.Errorf("bad resolved Cid: %s, %s", p, pin.Cid)
}
}
@ -293,12 +291,11 @@ func TestAllocation(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
pin, err := c.Allocation(ctx, ci)
pin, err := c.Allocation(ctx, test.Cid1)
if err != nil {
t.Fatal(err)
}
if pin.Cid.String() != test.TestCid1 {
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
@ -312,12 +309,11 @@ func TestStatus(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
pin, err := c.Status(ctx, ci, false)
pin, err := c.Status(ctx, test.Cid1, false)
if err != nil {
t.Fatal(err)
}
if pin.Cid.String() != test.TestCid1 {
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
@ -381,12 +377,11 @@ func TestSync(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
pin, err := c.Sync(ctx, ci, false)
pin, err := c.Sync(ctx, test.Cid1, false)
if err != nil {
t.Fatal(err)
}
if pin.Cid.String() != test.TestCid1 {
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
@ -419,12 +414,11 @@ func TestRecover(t *testing.T) {
defer shutdown(api)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
pin, err := c.Recover(ctx, ci, false)
pin, err := c.Recover(ctx, test.Cid1, false)
if err != nil {
t.Fatal(err)
}
if pin.Cid.String() != test.TestCid1 {
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
@ -490,53 +484,52 @@ type waitService struct {
pinStart time.Time
}
func (wait *waitService) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (wait *waitService) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
wait.l.Lock()
defer wait.l.Unlock()
wait.pinStart = time.Now()
return nil
}
func (wait *waitService) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (wait *waitService) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
wait.l.Lock()
defer wait.l.Unlock()
c1, _ := cid.Decode(in.Cid)
if time.Now().After(wait.pinStart.Add(5 * time.Second)) { //pinned
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
test.TestPeerID1: {
Cid: c1,
Peer: test.TestPeerID1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.PeerID1): {
Cid: in,
Peer: test.PeerID1,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
test.TestPeerID2: {
Cid: c1,
Peer: test.TestPeerID2,
peer.IDB58Encode(test.PeerID2): {
Cid: in,
Peer: test.PeerID2,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
},
}.ToSerial()
}
} else { // pinning
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
test.TestPeerID1: {
Cid: c1,
Peer: test.TestPeerID1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.PeerID1): {
Cid: in,
Peer: test.PeerID1,
Status: api.TrackerStatusPinning,
TS: wait.pinStart,
},
test.TestPeerID2: {
Cid: c1,
Peer: test.TestPeerID2,
peer.IDB58Encode(test.PeerID2): {
Cid: in,
Peer: test.PeerID2,
Status: api.TrackerStatusPinned,
TS: wait.pinStart,
},
},
}.ToSerial()
}
}
return nil
@ -557,7 +550,6 @@ func TestWaitFor(t *testing.T) {
tapi.SetClient(rpcC)
testF := func(t *testing.T, c Client) {
ci, _ := cid.Decode(test.TestCid1)
var wg sync.WaitGroup
wg.Add(1)
@ -567,7 +559,7 @@ func TestWaitFor(t *testing.T) {
defer cancel()
fp := StatusFilterParams{
Cid: ci,
Cid: test.Cid1,
Local: false,
Target: api.TrackerStatusPinned,
CheckFreq: time.Second,
@ -588,7 +580,7 @@ func TestWaitFor(t *testing.T) {
}
}
}()
err := c.Pin(ctx, ci, types.PinOptions{ReplicationFactorMin: 0, ReplicationFactorMax: 0, Name: "test", ShardSize: 0})
err := c.Pin(ctx, test.Cid1, types.PinOptions{ReplicationFactorMin: 0, ReplicationFactorMax: 0, Name: "test", ShardSize: 0})
if err != nil {
t.Fatal(err)
}

View File

@ -301,7 +301,7 @@ func basicAuthHandler(credentials map[string]string, h http.Handler) http.Handle
}
func unauthorizedResp() (string, error) {
apiError := types.Error{
apiError := &types.Error{
Code: 401,
Message: "Unauthorized",
}
@ -526,17 +526,17 @@ func (api *API) SetClient(c *rpc.Client) {
}
func (api *API) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := types.IDSerial{}
var id types.ID
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"ID",
struct{}{},
&idSerial,
&id,
)
api.sendResponse(w, autoStatus, err, idSerial)
api.sendResponse(w, autoStatus, err, &id)
}
func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) {
@ -554,7 +554,7 @@ func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) graphHandler(w http.ResponseWriter, r *http.Request) {
var graph types.ConnectGraphSerial
var graph types.ConnectGraph
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -570,7 +570,7 @@ func (api *API) metricsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
var metrics []types.Metric
var metrics []*types.Metric
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -611,17 +611,17 @@ func (api *API) addHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []types.IDSerial
var peers []*types.ID
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Peers",
struct{}{},
&peersSerial,
&peers,
)
api.sendResponse(w, autoStatus, err, peersSerial)
api.sendResponse(w, autoStatus, err, peers)
}
func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
@ -635,22 +635,22 @@ func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
return
}
_, err = peer.IDB58Decode(addInfo.PeerID)
pid, err := peer.IDB58Decode(addInfo.PeerID)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding peer_id"), nil)
return
}
var ids types.IDSerial
var id types.ID
err = api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PeerAdd",
addInfo.PeerID,
&ids,
pid,
&id,
)
api.sendResponse(w, autoStatus, err, ids)
api.sendResponse(w, autoStatus, err, &id)
}
func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
@ -668,15 +668,15 @@ func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
logger.Debugf("rest api pinHandler: %s", ps.Cid)
// span.AddAttributes(trace.StringAttribute("cid", ps.Cid))
if pin := api.parseCidOrError(w, r); pin != nil {
logger.Debugf("rest api pinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Pin",
ps,
pin,
&struct{}{},
)
api.sendResponse(w, http.StatusAccepted, err, nil)
@ -685,15 +685,15 @@ func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
logger.Debugf("rest api unpinHandler: %s", ps.Cid)
// span.AddAttributes(trace.StringAttribute("cid", ps.Cid))
if pin := api.parseCidOrError(w, r); pin != nil {
logger.Debugf("rest api unpinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Unpin",
ps,
pin,
&struct{}{},
)
api.sendResponse(w, http.StatusAccepted, err, nil)
@ -702,8 +702,8 @@ func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.PinSerial
if pinpath := api.parsePinPathOrError(w, r); pinpath.Path != "" {
var pin types.Pin
if pinpath := api.parsePinPathOrError(w, r); pinpath != nil {
logger.Debugf("rest api pinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
@ -720,8 +720,8 @@ func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) {
}
func (api *API) unpinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.PinSerial
if pinpath := api.parsePinPathOrError(w, r); pinpath.Path != "" {
var pin types.Pin
if pinpath := api.parsePinPathOrError(w, r); pinpath != nil {
logger.Debugf("rest api unpinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
@ -743,7 +743,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
for _, f := range strings.Split(filterStr, ",") {
filter |= types.PinTypeFromString(f)
}
var pins []types.PinSerial
var pins []*types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -752,50 +752,50 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
struct{}{},
&pins,
)
outPins := make([]types.PinSerial, 0)
for _, pinS := range pins {
if uint64(filter)&pinS.Type > 0 {
outPins := make([]*types.Pin, 0)
for _, pin := range pins {
if filter&pin.Type > 0 {
// add this pin to output
outPins = append(outPins, pinS)
outPins = append(outPins, pin)
}
}
api.sendResponse(w, autoStatus, err, outPins)
}
func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
var pin types.PinSerial
if pin := api.parseCidOrError(w, r); pin != nil {
var pinResp types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PinGet",
ps,
&pin,
pin.Cid,
&pinResp,
)
if err != nil { // errors here are 404s
api.sendResponse(w, http.StatusNotFound, err, nil)
return
}
api.sendResponse(w, autoStatus, nil, pin)
api.sendResponse(w, autoStatus, nil, pinResp)
}
}
// filterGlobalPinInfos takes a GlobalPinInfo slice and discards
// any item in it which does not carry a PinInfo matching the
// filter (OR-wise).
func filterGlobalPinInfos(globalPinInfos []types.GlobalPinInfoSerial, filter types.TrackerStatus) []types.GlobalPinInfoSerial {
func filterGlobalPinInfos(globalPinInfos []*types.GlobalPinInfo, filter types.TrackerStatus) []*types.GlobalPinInfo {
if filter == types.TrackerStatusUndefined {
return globalPinInfos
}
var filteredGlobalPinInfos []types.GlobalPinInfoSerial
var filteredGlobalPinInfos []*types.GlobalPinInfo
for _, globalPinInfo := range globalPinInfos {
for _, pinInfo := range globalPinInfo.PeerMap {
st := types.TrackerStatusFromString(pinInfo.Status)
// silenced the error because we should have detected earlier if filters were invalid
if st.Match(filter) {
// silenced the error because we should have detected
// earlier if filters were invalid
if pinInfo.Status.Match(filter) {
filteredGlobalPinInfos = append(filteredGlobalPinInfos, globalPinInfo)
break
}
@ -809,7 +809,7 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
var globalPinInfos []types.GlobalPinInfoSerial
var globalPinInfos []*types.GlobalPinInfo
filterStr := queryValues.Get("filter")
filter := types.TrackerStatusFromString(filterStr)
@ -819,7 +819,7 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
}
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
@ -858,26 +858,26 @@ func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"StatusLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Status",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -890,7 +890,7 @@ func (api *API) syncAllHandler(w http.ResponseWriter, r *http.Request) {
local := queryValues.Get("local")
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -901,7 +901,7 @@ func (api *API) syncAllHandler(w http.ResponseWriter, r *http.Request) {
)
api.sendResponse(w, autoStatus, err, pinInfosToGlobal(pinInfos))
} else {
var pinInfos []types.GlobalPinInfoSerial
var pinInfos []*types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -918,26 +918,26 @@ func (api *API) syncHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"SyncLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Sync",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -949,7 +949,7 @@ func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if local == "true" {
var pinInfos []types.PinInfoSerial
var pinInfos []*types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
@ -968,26 +968,26 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if ps := api.parseCidOrError(w, r); ps.Cid != "" {
if pin := api.parseCidOrError(w, r); pin != nil {
if local == "true" {
var pinInfo types.PinInfoSerial
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RecoverLocal",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(pinInfo))
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
} else {
var pinInfo types.GlobalPinInfoSerial
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Recover",
ps,
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfo)
@ -995,37 +995,34 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (api *API) parsePinPathOrError(w http.ResponseWriter, r *http.Request) types.PinPath {
func (api *API) parsePinPathOrError(w http.ResponseWriter, r *http.Request) *types.PinPath {
vars := mux.Vars(r)
urlpath := "/" + vars["keyType"] + "/" + strings.TrimSuffix(vars["path"], "/")
path, err := gopath.ParsePath(urlpath)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error parsing path: "+err.Error()), nil)
return types.PinPath{}
return nil
}
pinPath := types.PinPath{Path: path.String()}
pinPath := &types.PinPath{Path: path.String()}
pinPath.PinOptions.FromQuery(r.URL.Query())
return pinPath
}
func (api *API) parseCidOrError(w http.ResponseWriter, r *http.Request) types.PinSerial {
func (api *API) parseCidOrError(w http.ResponseWriter, r *http.Request) *types.Pin {
vars := mux.Vars(r)
hash := vars["hash"]
_, err := cid.Decode(hash)
c, err := cid.Decode(hash)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding Cid: "+err.Error()), nil)
return types.PinSerial{Cid: ""}
return nil
}
pin := types.PinSerial{
Cid: hash,
Type: uint64(types.DataType),
}
pin.PinOptions.FromQuery(r.URL.Query())
opts := types.PinOptions{}
opts.FromQuery(r.URL.Query())
pin := types.PinWithOpts(c, opts)
pin.MaxDepth = -1 // For now, all pins are recursive
return pin
}
@ -1041,17 +1038,17 @@ func (api *API) parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID
return pid
}
func pinInfoToGlobal(pInfo types.PinInfoSerial) types.GlobalPinInfoSerial {
return types.GlobalPinInfoSerial{
func pinInfoToGlobal(pInfo *types.PinInfo) *types.GlobalPinInfo {
return &types.GlobalPinInfo{
Cid: pInfo.Cid,
PeerMap: map[string]types.PinInfoSerial{
pInfo.Peer: pInfo,
PeerMap: map[string]*types.PinInfo{
peer.IDB58Encode(pInfo.Peer): pInfo,
},
}
}
func pinInfosToGlobal(pInfos []types.PinInfoSerial) []types.GlobalPinInfoSerial {
gPInfos := make([]types.GlobalPinInfoSerial, len(pInfos), len(pInfos))
func pinInfosToGlobal(pInfos []*types.PinInfo) []*types.GlobalPinInfo {
gPInfos := make([]*types.GlobalPinInfo, len(pInfos), len(pInfos))
for i, p := range pInfos {
gPInfos[i] = pinInfoToGlobal(p)
}

View File

@ -98,7 +98,6 @@ func processResp(t *testing.T, httpResp *http.Response, err error, resp interfac
if err != nil {
t.Fatal("error reading body: ", err)
}
if len(body) != 0 {
err = json.Unmarshal(body, resp)
if err != nil {
@ -307,17 +306,17 @@ func TestRestAPIIDEndpoint(t *testing.T) {
defer httpsrest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
makeGet(t, rest, url(rest)+"/id", &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.PeerID1.Pretty() {
t.Error("expected correct id")
}
}
httpstf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
makeGet(t, httpsrest, url(httpsrest)+"/id", &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.PeerID1.Pretty() {
t.Error("expected correct id")
}
}
@ -348,12 +347,12 @@ func TestAPIPeerstEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var list []api.IDSerial
var list []*api.ID
makeGet(t, rest, url(rest)+"/peers", &list)
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID != test.TestPeerID1.Pretty() {
if list[0].ID.Pretty() != test.PeerID1.Pretty() {
t.Error("expected a different peer id list: ", list)
}
}
@ -367,13 +366,12 @@ func TestAPIPeerAddEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
id := api.IDSerial{}
id := api.ID{}
// post with valid body
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", test.TestPeerID1.Pretty())
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", test.PeerID1.Pretty())
t.Log(body)
makePost(t, rest, url(rest)+"/peers", []byte(body), &id)
if id.ID != test.TestPeerID1.Pretty() {
if id.ID.Pretty() != test.PeerID1.Pretty() {
t.Error("expected correct ID")
}
if id.Error != "" {
@ -520,7 +518,7 @@ func TestAPIPeerRemoveEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
makeDelete(t, rest, url(rest)+"/peers/"+test.TestPeerID1.Pretty(), &struct{}{})
makeDelete(t, rest, url(rest)+"/peers/"+test.PeerID1.Pretty(), &struct{}{})
}
testBothEndpoints(t, tf)
@ -532,9 +530,9 @@ func TestConnectGraphEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var cg api.ConnectGraphSerial
var cg api.ConnectGraph
makeGet(t, rest, url(rest)+"/health/graph", &cg)
if cg.ClusterID != test.TestPeerID1.Pretty() {
if cg.ClusterID.Pretty() != test.PeerID1.Pretty() {
t.Error("unexpected cluster id")
}
if len(cg.IPFSLinks) != 3 {
@ -547,12 +545,12 @@ func TestConnectGraphEndpoint(t *testing.T) {
t.Error("unexpected number of cluster to ipfs links")
}
// test a few link values
pid1 := test.TestPeerID1.Pretty()
pid4 := test.TestPeerID4.Pretty()
if _, ok := cg.ClustertoIPFS[pid1]; !ok {
pid1 := test.PeerID1
pid4 := test.PeerID4
if _, ok := cg.ClustertoIPFS[peer.IDB58Encode(pid1)]; !ok {
t.Fatal("missing cluster peer 1 from cluster to peer links map")
}
if cg.ClustertoIPFS[pid1] != pid4 {
if cg.ClustertoIPFS[peer.IDB58Encode(pid1)] != pid4 {
t.Error("unexpected ipfs peer mapped to cluster peer 1 in graph")
}
}
@ -567,10 +565,10 @@ func TestAPIPinEndpoint(t *testing.T) {
tf := func(t *testing.T, url urlF) {
// test regular post
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1, []byte{}, &struct{}{})
makePost(t, rest, url(rest)+"/pins/"+test.Cid1.String(), []byte{}, &struct{}{})
errResp := api.Error{}
makePost(t, rest, url(rest)+"/pins/"+test.ErrorCid, []byte{}, &errResp)
makePost(t, rest, url(rest)+"/pins/"+test.ErrorCid.String(), []byte{}, &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -622,7 +620,7 @@ var pathTestCases = []pathCase{
http.StatusBadRequest,
},
// TODO: Test StatusNotFound and a case with trailing slash with paths
// test.TestPathIPNS2, test.TestPathIPLD2, test.TestInvalidPath1
// test.PathIPNS2, test.PathIPLD2, test.InvalidPath1
}
func TestAPIPinEndpointWithPath(t *testing.T) {
@ -631,7 +629,7 @@ func TestAPIPinEndpointWithPath(t *testing.T) {
defer rest.Shutdown(ctx)
resultantPin := api.PinWithOpts(
test.MustDecodeCid(test.TestCidResolved),
test.CidResolved,
testPinOpts,
)
@ -645,10 +643,10 @@ func TestAPIPinEndpointWithPath(t *testing.T) {
}
continue
}
pin := api.PinSerial{}
pin := api.Pin{}
makePost(t, rest, url(rest)+"/pins"+testCase.WithQuery(), []byte{}, &pin)
if !pin.ToPin().Equals(resultantPin) {
t.Errorf("expected different pin,\n expected: %+v,\n actual: %+v,\n path: %s\n", resultantPin.ToSerial(), pin, testCase.path)
if !pin.Equals(resultantPin) {
t.Errorf("expected different pin,\n expected: %+v,\n actual: %+v,\n path: %s\n", resultantPin, pin, testCase.path)
}
}
}
@ -663,10 +661,10 @@ func TestAPIUnpinEndpoint(t *testing.T) {
tf := func(t *testing.T, url urlF) {
// test regular delete
makeDelete(t, rest, url(rest)+"/pins/"+test.TestCid1, &struct{}{})
makeDelete(t, rest, url(rest)+"/pins/"+test.Cid1.String(), &struct{}{})
errResp := api.Error{}
makeDelete(t, rest, url(rest)+"/pins/"+test.ErrorCid, &errResp)
makeDelete(t, rest, url(rest)+"/pins/"+test.ErrorCid.String(), &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -695,10 +693,10 @@ func TestAPIUnpinEndpointWithPath(t *testing.T) {
}
continue
}
pin := api.PinSerial{}
pin := api.Pin{}
makeDelete(t, rest, url(rest)+"/pins"+testCase.path, &pin)
if pin.Cid != test.TestCidResolved {
t.Errorf("expected different cid, expected: %s, actual: %s, path: %s\n", test.TestCidResolved, pin.Cid, testCase.path)
if !pin.Cid.Equals(test.CidResolved) {
t.Errorf("expected different cid, expected: %s, actual: %s, path: %s\n", test.CidResolved, pin.Cid, testCase.path)
}
}
}
@ -712,11 +710,11 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.PinSerial
var resp []*api.Pin
makeGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 || resp[1].Cid != test.TestCid2 ||
resp[2].Cid != test.TestCid3 {
!resp[0].Cid.Equals(test.Cid1) || !resp[1].Cid.Equals(test.Cid2) ||
!resp[2].Cid.Equals(test.Cid3) {
t.Error("unexpected pin list: ", resp)
}
}
@ -730,14 +728,14 @@ func TestAPIAllocationEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.PinSerial
makeGet(t, rest, url(rest)+"/allocations/"+test.TestCid1, &resp)
if resp.Cid != test.TestCid1 {
t.Error("cid should be the same")
var resp api.Pin
makeGet(t, rest, url(rest)+"/allocations/"+test.Cid1.String(), &resp)
if !resp.Cid.Equals(test.Cid1) {
t.Errorf("cid should be the same: %s %s", resp.Cid, test.Cid1)
}
errResp := api.Error{}
makeGet(t, rest, url(rest)+"/allocations/"+test.ErrorCid, &errResp)
makeGet(t, rest, url(rest)+"/allocations/"+test.ErrorCid.String(), &errResp)
if errResp.Code != 404 {
t.Error("a non-pinned cid should 404")
}
@ -752,7 +750,7 @@ func TestAPIMetricsEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.MetricSerial
var resp []*api.Metric
makeGet(t, rest, url(rest)+"/monitor/metrics/somemetricstype", &resp)
if len(resp) == 0 {
t.Fatal("No metrics found")
@ -761,7 +759,7 @@ func TestAPIMetricsEndpoint(t *testing.T) {
if m.Name != "test" {
t.Error("Unexpected metric name: ", m.Name)
}
if m.Peer != test.TestPeerID1.Pretty() {
if m.Peer.Pretty() != test.PeerID1.Pretty() {
t.Error("Unexpected peer id: ", m.Peer)
}
}
@ -776,47 +774,48 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins", &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusAll resp:\n %+v", resp)
!resp[0].Cid.Equals(test.Cid1) ||
resp[1].PeerMap[peer.IDB58Encode(test.PeerID1)].Status.String() != "pinning" {
t.Errorf("unexpected statusAll resp")
}
// Test local=true
var resp2 []api.GlobalPinInfoSerial
var resp2 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?local=true", &resp2)
if len(resp2) != 2 {
t.Errorf("unexpected statusAll+local resp:\n %+v", resp2)
}
// Test with filter
var resp3 []api.GlobalPinInfoSerial
var resp3 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=queued", &resp3)
if len(resp3) != 0 {
t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3)
}
var resp4 []api.GlobalPinInfoSerial
var resp4 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4)
if len(resp4) != 1 {
t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4)
}
var resp5 []api.GlobalPinInfoSerial
var resp5 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5)
if len(resp5) != 1 {
t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5)
}
var resp6 []api.GlobalPinInfoSerial
var resp6 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=error", &resp6)
if len(resp6) != 1 {
t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6)
}
var resp7 []api.GlobalPinInfoSerial
var resp7 []*api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7)
if len(resp7) != 2 {
t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7)
@ -832,32 +831,32 @@ func TestAPIStatusEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
makeGet(t, rest, url(rest)+"/pins/"+test.TestCid1, &resp)
var resp api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins/"+test.Cid1.String(), &resp)
if resp.Cid != test.TestCid1 {
if !resp.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.PeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
t.Fatal("expected info for test.PeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
// Test local=true
var resp2 api.GlobalPinInfoSerial
makeGet(t, rest, url(rest)+"/pins/"+test.TestCid1+"?local=true", &resp2)
var resp2 api.GlobalPinInfo
makeGet(t, rest, url(rest)+"/pins/"+test.Cid1.String()+"?local=true", &resp2)
if resp2.Cid != test.TestCid1 {
if !resp2.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[test.TestPeerID2.Pretty()]
info, ok = resp2.PeerMap[peer.IDB58Encode(test.PeerID2)]
if !ok {
t.Fatal("expected info for test.TestPeerID2")
t.Fatal("expected info for test.PeerID2")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -871,17 +870,17 @@ func TestAPISyncAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/sync", []byte{}, &resp)
if len(resp) != 3 ||
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
!resp[0].Cid.Equals(test.Cid1) ||
resp[1].PeerMap[peer.IDB58Encode(test.PeerID1)].Status.String() != "pinning" {
t.Errorf("unexpected syncAll resp:\n %+v", resp)
}
// Test local=true
var resp2 []api.GlobalPinInfoSerial
var resp2 []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/sync?local=true", []byte{}, &resp2)
if len(resp2) != 2 {
@ -898,32 +897,32 @@ func TestAPISyncEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/sync", []byte{}, &resp)
var resp api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.Cid1.String()+"/sync", []byte{}, &resp)
if resp.Cid != test.TestCid1 {
if !resp.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.PeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
t.Fatal("expected info for test.PeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
// Test local=true
var resp2 api.GlobalPinInfoSerial
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/sync?local=true", []byte{}, &resp2)
var resp2 api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.Cid1.String()+"/sync?local=true", []byte{}, &resp2)
if resp2.Cid != test.TestCid1 {
if !resp2.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[test.TestPeerID2.Pretty()]
info, ok = resp2.PeerMap[peer.IDB58Encode(test.PeerID2)]
if !ok {
t.Fatal("expected info for test.TestPeerID2")
t.Fatal("expected info for test.PeerID2")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -937,17 +936,17 @@ func TestAPIRecoverEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp api.GlobalPinInfoSerial
makePost(t, rest, url(rest)+"/pins/"+test.TestCid1+"/recover", []byte{}, &resp)
var resp api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/"+test.Cid1.String()+"/recover", []byte{}, &resp)
if resp.Cid != test.TestCid1 {
if !resp.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
info, ok := resp.PeerMap[peer.IDB58Encode(test.PeerID1)]
if !ok {
t.Fatal("expected info for test.TestPeerID1")
t.Fatal("expected info for test.PeerID1")
}
if info.Status != "pinned" {
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
@ -961,7 +960,7 @@ func TestAPIRecoverAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url urlF) {
var resp []api.GlobalPinInfoSerial
var resp []*api.GlobalPinInfo
makePost(t, rest, url(rest)+"/pins/recover?local=true", []byte{}, &resp)
if len(resp) != 0 {

View File

@ -9,7 +9,6 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
@ -26,7 +25,7 @@ import (
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
multiaddr "github.com/multiformats/go-multiaddr"
// needed to parse /ws multiaddresses
_ "github.com/libp2p/go-ws-transport"
@ -130,6 +129,23 @@ func (st TrackerStatus) Match(filter TrackerStatus) bool {
return filter == 0 || st&filter > 0
}
// MarshalJSON uses the string representation of TrackerStatus for JSON
// encoding.
func (st TrackerStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(st.String())
}
// UnmarshalJSON sets a tracker status from its JSON representation.
func (st *TrackerStatus) UnmarshalJSON(data []byte) error {
var v string
err := json.Unmarshal(data, &v)
if err != nil {
return err
}
*st = TrackerStatusFromString(v)
return nil
}
// TrackerStatusFromString parses a string and returns the matching
// TrackerStatus value. The string can be a comma-separated list
// representing a TrackerStatus filter. Unknown status names are
@ -225,166 +241,35 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid cid.Cid
PeerMap map[peer.ID]PinInfo
}
// GlobalPinInfoSerial is the serializable version of GlobalPinInfo.
type GlobalPinInfoSerial struct {
Cid string `json:"cid"`
PeerMap map[string]PinInfoSerial `json:"peer_map"`
}
// ToSerial converts a GlobalPinInfo to its serializable version.
func (gpi GlobalPinInfo) ToSerial() GlobalPinInfoSerial {
s := GlobalPinInfoSerial{}
if gpi.Cid.Defined() {
s.Cid = gpi.Cid.String()
}
s.PeerMap = make(map[string]PinInfoSerial)
for k, v := range gpi.PeerMap {
s.PeerMap[peer.IDB58Encode(k)] = v.ToSerial()
}
return s
}
// ToGlobalPinInfo converts a GlobalPinInfoSerial to its native version.
func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
c, err := cid.Decode(gpis.Cid)
if err != nil {
logger.Debug(gpis.Cid, err)
}
gpi := GlobalPinInfo{
Cid: c,
PeerMap: make(map[peer.ID]PinInfo),
}
for k, v := range gpis.PeerMap {
p, err := peer.IDB58Decode(k)
if err != nil {
logger.Error(k, err)
}
gpi.PeerMap[p] = v.ToPinInfo()
}
return gpi
Cid cid.Cid `json:"cid" codec:"c"`
// https://github.com/golang/go/issues/28827
// Peer IDs are of string Kind(). We can't use peer IDs here
// as Go ignores TextMarshaler.
PeerMap map[string]*PinInfo `json:"peer_map" codec:"pm,omitempty"`
}
// PinInfo holds information about local pins.
type PinInfo struct {
Cid cid.Cid
Peer peer.ID
PeerName string
Status TrackerStatus
TS time.Time
Error string
}
// PinInfoSerial is a serializable version of PinInfo.
// information is marked as
type PinInfoSerial struct {
Cid string `json:"cid"`
Peer string `json:"peer"`
PeerName string `json:"peername"`
Status string `json:"status"`
TS string `json:"timestamp"`
Error string `json:"error"`
}
// ToSerial converts a PinInfo to its serializable version.
func (pi PinInfo) ToSerial() PinInfoSerial {
c := ""
if pi.Cid.Defined() {
c = pi.Cid.String()
}
p := ""
if pi.Peer != "" {
p = peer.IDB58Encode(pi.Peer)
}
return PinInfoSerial{
Cid: c,
Peer: p,
PeerName: pi.PeerName,
Status: pi.Status.String(),
TS: pi.TS.UTC().Format(time.RFC3339),
Error: pi.Error,
}
}
// ToPinInfo converts a PinInfoSerial to its native version.
func (pis PinInfoSerial) ToPinInfo() PinInfo {
c, err := cid.Decode(pis.Cid)
if err != nil {
logger.Debug(pis.Cid, err)
}
p, err := peer.IDB58Decode(pis.Peer)
if err != nil {
logger.Debug(pis.Peer, err)
}
ts, err := time.Parse(time.RFC3339, pis.TS)
if err != nil {
logger.Debug(pis.TS, err)
}
return PinInfo{
Cid: c,
Peer: p,
PeerName: pis.PeerName,
Status: TrackerStatusFromString(pis.Status),
TS: ts,
Error: pis.Error,
}
Cid cid.Cid `json:"cid" codec:"c"`
Peer peer.ID `json:"peer" codec:"p,omitempty"`
PeerName string `json:"peername" codec:"pn,omitempty"`
Status TrackerStatus `json:"status" codec:"st,omitempty"`
TS time.Time `json:"timestamp" codec:"ts,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
}
// Version holds version information
type Version struct {
Version string `json:"Version"`
Version string `json:"Version" codec:"v"`
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
Error string `json:"error"`
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
p := ""
if id.ID != "" {
p = peer.IDB58Encode(id.ID)
}
return IPFSIDSerial{
ID: p,
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToIPFSID converts an IPFSIDSerial to IPFSID
func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
}
// ConnectGraph holds information about the connectivity of the cluster
// To read, traverse the keys of ClusterLinks. Each such id is one of
// the peers of the "ClusterID" peer running the query. ClusterLinks[id]
// in turn lists the ids that peer "id" sees itself connected to. It is
// possible that id is a peer of ClusterID, but ClusterID can not reach id
// over rpc, in which case ClusterLinks[id] == [], as id's view of its
// connectivity can not be retrieved.
// ConnectGraph holds information about the connectivity of the cluster To
// read, traverse the keys of ClusterLinks. Each such id is one of the peers
// of the "ClusterID" peer running the query. ClusterLinks[id] in turn lists
// the ids that peer "id" sees itself connected to. It is possible that id is
// a peer of ClusterID, but ClusterID can not reach id over rpc, in which case
// ClusterLinks[id] == [], as id's view of its connectivity can not be
// retrieved.
//
// Iff there was an error reading the IPFSID of the peer then id will not be a
// key of ClustertoIPFS or IPFSLinks. Finally iff id is a key of ClustertoIPFS
@ -392,235 +277,83 @@ func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
// IPFSLinks[id] == [].
type ConnectGraph struct {
ClusterID peer.ID
IPFSLinks map[peer.ID][]peer.ID // ipfs to ipfs links
ClusterLinks map[peer.ID][]peer.ID // cluster to cluster links
ClustertoIPFS map[peer.ID]peer.ID // cluster to ipfs links
// ipfs to ipfs links
IPFSLinks map[string][]peer.ID `json:"ipfs_links" codec:"il,omitempty"`
// cluster to cluster links
ClusterLinks map[string][]peer.ID `json:"cluster_links" codec:"cl,omitempty"`
// cluster to ipfs links
ClustertoIPFS map[string]peer.ID `json:"cluster_to_ipfs" codec:"ci,omitempty"`
}
// ConnectGraphSerial is the serializable ConnectGraph counterpart for RPC requests
type ConnectGraphSerial struct {
ClusterID string
IPFSLinks map[string][]string `json:"ipfs_links"`
ClusterLinks map[string][]string `json:"cluster_links"`
ClustertoIPFS map[string]string `json:"cluster_to_ipfs"`
// Multiaddr is a concrete type to wrap a Multiaddress so that it knows how to
// serialize and deserialize itself.
type Multiaddr struct {
multiaddr.Multiaddr
}
// ToSerial converts a ConnectGraph to its Go-serializable version
func (cg ConnectGraph) ToSerial() ConnectGraphSerial {
IPFSLinksSerial := serializeLinkMap(cg.IPFSLinks)
ClusterLinksSerial := serializeLinkMap(cg.ClusterLinks)
ClustertoIPFSSerial := make(map[string]string)
for k, v := range cg.ClustertoIPFS {
ClustertoIPFSSerial[peer.IDB58Encode(k)] = peer.IDB58Encode(v)
}
return ConnectGraphSerial{
ClusterID: peer.IDB58Encode(cg.ClusterID),
IPFSLinks: IPFSLinksSerial,
ClusterLinks: ClusterLinksSerial,
ClustertoIPFS: ClustertoIPFSSerial,
}
// NewMultiaddr returns a cluster Multiaddr wrapper creating the
// multiaddr.Multiaddr with the given string.
func NewMultiaddr(mstr string) (Multiaddr, error) {
m, err := multiaddr.NewMultiaddr(mstr)
return Multiaddr{Multiaddr: m}, err
}
// ToConnectGraph converts a ConnectGraphSerial to a ConnectGraph
func (cgs ConnectGraphSerial) ToConnectGraph() ConnectGraph {
ClustertoIPFS := make(map[peer.ID]peer.ID)
for k, v := range cgs.ClustertoIPFS {
pid1, _ := peer.IDB58Decode(k)
pid2, _ := peer.IDB58Decode(v)
ClustertoIPFS[pid1] = pid2
}
pid, _ := peer.IDB58Decode(cgs.ClusterID)
return ConnectGraph{
ClusterID: pid,
IPFSLinks: deserializeLinkMap(cgs.IPFSLinks),
ClusterLinks: deserializeLinkMap(cgs.ClusterLinks),
ClustertoIPFS: ClustertoIPFS,
}
// NewMultiaddrWithValue returns a new cluster Multiaddr wrapper using the
// given multiaddr.Multiaddr.
func NewMultiaddrWithValue(ma multiaddr.Multiaddr) Multiaddr {
return Multiaddr{Multiaddr: ma}
}
func serializeLinkMap(Links map[peer.ID][]peer.ID) map[string][]string {
LinksSerial := make(map[string][]string)
for k, v := range Links {
kS := peer.IDB58Encode(k)
LinksSerial[kS] = PeersToStrings(v)
}
return LinksSerial
// MarshalJSON returns a JSON-formatted multiaddress.
func (maddr Multiaddr) MarshalJSON() ([]byte, error) {
return maddr.Multiaddr.MarshalJSON()
}
func deserializeLinkMap(LinksSerial map[string][]string) map[peer.ID][]peer.ID {
Links := make(map[peer.ID][]peer.ID)
for k, v := range LinksSerial {
pid, _ := peer.IDB58Decode(k)
Links[pid] = StringsToPeers(v)
}
return Links
// UnmarshalJSON parses a cluster Multiaddr from the JSON representation.
func (maddr *Multiaddr) UnmarshalJSON(data []byte) error {
maddr.Multiaddr, _ = multiaddr.NewMultiaddr("")
return maddr.Multiaddr.UnmarshalJSON(data)
}
// SwarmPeers lists an ipfs daemon's peers
type SwarmPeers []peer.ID
// SwarmPeersSerial is the serialized form of SwarmPeers for RPC use
type SwarmPeersSerial []string
// ToSerial converts SwarmPeers to its Go-serializeable version
func (swarm SwarmPeers) ToSerial() SwarmPeersSerial {
return PeersToStrings(swarm)
// MarshalBinary returs the bytes of the wrapped multiaddress.
func (maddr Multiaddr) MarshalBinary() ([]byte, error) {
return maddr.Multiaddr.MarshalBinary()
}
// ToSwarmPeers converts a SwarmPeersSerial object to SwarmPeers.
func (swarmS SwarmPeersSerial) ToSwarmPeers() SwarmPeers {
return StringsToPeers(swarmS)
// UnmarshalBinary casts some bytes as a multiaddress wraps it with
// the given cluster Multiaddr.
func (maddr *Multiaddr) UnmarshalBinary(data []byte) error {
datacopy := make([]byte, len(data)) // This is super important
copy(datacopy, data)
maddr.Multiaddr, _ = multiaddr.NewMultiaddr("")
return maddr.Multiaddr.UnmarshalBinary(datacopy)
}
// Value returns the wrapped multiaddr.Multiaddr.
func (maddr Multiaddr) Value() multiaddr.Multiaddr {
return maddr.Multiaddr
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
Addresses []ma.Multiaddr
ClusterPeers []peer.ID
ClusterPeersAddresses []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
Peername string
ID peer.ID `json:"id" codec:"i,omitempty"`
Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"`
ClusterPeers []peer.ID `json:"cluster_peers" codec:"cp,omitempty"`
ClusterPeersAddresses []Multiaddr `json:"cluster_peers_addresses" codec:"cpa,omitempty"`
Version string `json:"version" codec:"v,omitempty"`
Commit string `json:"commit" codec:"c,omitempty"`
RPCProtocolVersion protocol.ID `json:"rpc_protocol_version" codec:"rv,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
IPFS IPFSID `json:"ipfs" codec:"ip,omitempty"`
Peername string `json:"peername" codec:"pn,omitempty"`
//PublicKey crypto.PubKey
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
ClusterPeers []string `json:"cluster_peers"`
ClusterPeersAddresses MultiaddrsSerial `json:"cluster_peers_addresses"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error"`
IPFS IPFSIDSerial `json:"ipfs"`
Peername string `json:"peername"`
//PublicKey []byte
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
//var pkey []byte
//if id.PublicKey != nil {
// pkey, _ = id.PublicKey.Bytes()
//}
p := ""
if id.ID != "" {
p = peer.IDB58Encode(id.ID)
}
return IDSerial{
ID: p,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: PeersToStrings(id.ClusterPeers),
ClusterPeersAddresses: MultiaddrsToSerial(id.ClusterPeersAddresses),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
Peername: id.Peername,
//PublicKey: pkey,
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
p, err := peer.IDB58Decode(ids.ID)
if err != nil {
logger.Debug(ids.ID, err)
}
id.ID = p
//if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
// id.PublicKey = pkey
//}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = StringsToPeers(ids.ClusterPeers)
id.ClusterPeersAddresses = ids.ClusterPeersAddresses.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToIPFSID()
id.Peername = ids.Peername
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial string
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
if addr != nil {
return MultiaddrSerial(addr.String())
}
return ""
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
str := string(addrS)
a, err := ma.NewMultiaddr(str)
if err != nil {
logger.Error(str, err)
}
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
if a != nil {
addrsS[i] = MultiaddrToSerial(a)
}
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
}
// CidsToStrings encodes cid.Cids to strings.
func CidsToStrings(cids []cid.Cid) []string {
strs := make([]string, len(cids))
for i, c := range cids {
strs[i] = c.String()
}
return strs
}
// StringsToCidSet decodes cid.Cids from strings.
func StringsToCidSet(strs []string) *cid.Set {
cids := cid.NewSet()
for _, str := range strs {
c, err := cid.Decode(str)
if err != nil {
logger.Error(str, err)
}
cids.Add(c)
}
return cids
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID `json:"id" codec:"i,omitempty"`
Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
}
// PinType specifies which sort of Pin object we are dealing with.
@ -819,23 +552,24 @@ func (po *PinOptions) FromQuery(q url.Values) {
type Pin struct {
PinOptions
Cid cid.Cid
Cid cid.Cid `json:"cid" codec:"c"`
// See PinType comments
Type PinType
Type PinType `json:"type" codec:"t,omitempty"`
// The peers to which this pin is allocated
Allocations []peer.ID
Allocations []peer.ID `json:"allocations" codec:"a,omitempty"`
// MaxDepth associated to this pin. -1 means
// recursive.
MaxDepth int
MaxDepth int `json:"max_depth" codec:"d,omitempty"`
// We carry a reference CID to this pin. For
// ClusterDAGs, it is the MetaPin CID. For the
// MetaPin it is the ClusterDAG CID. For Shards,
// it is the previous shard CID.
Reference cid.Cid
// When not needed the pointer is nil
Reference *cid.Cid `json:"reference" codec:"r,omitempty"`
}
// PinPath is a wrapper for holding pin options and path of the content.
@ -846,8 +580,8 @@ type PinPath struct {
// PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to
// be recursive and the pin to be of DataType.
func PinCid(c cid.Cid) Pin {
return Pin{
func PinCid(c cid.Cid) *Pin {
return &Pin{
Cid: c,
Type: DataType,
Allocations: []peer.ID{},
@ -857,46 +591,12 @@ func PinCid(c cid.Cid) Pin {
// PinWithOpts creates a new Pin calling PinCid(c) and then sets
// its PinOptions fields with the given options.
func PinWithOpts(c cid.Cid, opts PinOptions) Pin {
func PinWithOpts(c cid.Cid, opts PinOptions) *Pin {
p := PinCid(c)
p.PinOptions = opts
return p
}
// PinSerial is a serializable version of Pin
type PinSerial struct {
PinOptions
Cid string `json:"cid" codec:"c,omitempty"`
Type uint64 `json:"type" codec:"t,omitempty"`
Allocations []string `json:"allocations" codec:"a,omitempty"`
MaxDepth int `json:"max_depth" codec:"d,omitempty"`
Reference string `json:"reference" codec:"r,omitempty"`
}
// ToSerial converts a Pin to PinSerial.
func (pin Pin) ToSerial() PinSerial {
c := ""
if pin.Cid.Defined() {
c = pin.Cid.String()
}
ref := ""
if pin.Reference.Defined() {
ref = pin.Reference.String()
}
allocs := PeersToStrings(pin.Allocations)
return PinSerial{
Cid: c,
Allocations: allocs,
Type: uint64(pin.Type),
MaxDepth: pin.MaxDepth,
Reference: ref,
PinOptions: pin.PinOptions,
}
}
func convertPinType(t PinType) pb.Pin_PinType {
var i pb.Pin_PinType
for t != 1 {
@ -934,9 +634,11 @@ func (pin *Pin) ProtoMarshal() ([]byte, error) {
Type: convertPinType(pin.Type),
Allocations: allocs,
MaxDepth: int32(pin.MaxDepth),
Reference: pin.Reference.Bytes(),
Options: opts,
}
if ref := pin.Reference; ref != nil {
pbPin.Reference = ref.Bytes()
}
return proto.Marshal(pbPin)
}
@ -971,12 +673,11 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
pin.MaxDepth = int(pbPin.GetMaxDepth())
ref, err := cid.Cast(pbPin.GetReference())
if err != nil {
pin.Reference = cid.Undef
pin.Reference = nil
} else {
pin.Reference = ref
pin.Reference = &ref
}
pin.Reference = ref
opts := pbPin.GetOptions()
pin.ReplicationFactorMin = int(opts.GetReplicationFactorMin())
@ -991,34 +692,43 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
// Equals checks if two pins are the same (with the same allocations).
// If allocations are the same but in different order, they are still
// considered equivalent.
func (pin Pin) Equals(pin2 Pin) bool {
pin1s := pin.ToSerial()
pin2s := pin2.ToSerial()
if pin1s.Cid != pin2s.Cid {
func (pin *Pin) Equals(pin2 *Pin) bool {
if pin == nil && pin2 != nil || pin2 == nil && pin != nil {
return false
}
if pin1s.Name != pin2s.Name {
if !pin.Cid.Equals(pin2.Cid) {
return false
}
if pin1s.Type != pin2s.Type {
if pin.Name != pin2.Name {
return false
}
if pin1s.MaxDepth != pin2s.MaxDepth {
if pin.Type != pin2.Type {
return false
}
if pin1s.Reference != pin2s.Reference {
if pin.MaxDepth != pin2.MaxDepth {
return false
}
sort.Strings(pin1s.Allocations)
sort.Strings(pin2s.Allocations)
if pin.Reference != nil && pin2.Reference == nil ||
pin.Reference == nil && pin2.Reference != nil {
return false
}
if strings.Join(pin1s.Allocations, ",") != strings.Join(pin2s.Allocations, ",") {
if pin.Reference != nil && pin2.Reference != nil &&
!pin.Reference.Equals(*pin2.Reference) {
return false
}
allocs1 := PeersToStrings(pin.Allocations)
sort.Strings(allocs1)
allocs2 := PeersToStrings(pin2.Allocations)
sort.Strings(allocs2)
if strings.Join(allocs1, ",") != strings.Join(allocs2, ",") {
return false
}
@ -1027,7 +737,7 @@ func (pin Pin) Equals(pin2 Pin) bool {
// IsRemotePin determines whether a Pin's ReplicationFactor has
// been met, so as to either pin or unpin it from the peer.
func (pin Pin) IsRemotePin(pid peer.ID) bool {
func (pin *Pin) IsRemotePin(pid peer.ID) bool {
if pin.ReplicationFactorMax < 0 || pin.ReplicationFactorMin < 0 {
return false
}
@ -1040,56 +750,13 @@ func (pin Pin) IsRemotePin(pid peer.ID) bool {
return true
}
// ToPin converts a PinSerial to its native form.
func (pins PinSerial) ToPin() Pin {
c, err := cid.Decode(pins.Cid)
if err != nil {
logger.Debug(pins.Cid, err)
}
var ref cid.Cid
if pins.Reference != "" {
ref, err = cid.Decode(pins.Reference)
if err != nil {
logger.Warning(pins.Reference, err)
}
}
return Pin{
Cid: c,
Allocations: StringsToPeers(pins.Allocations),
Type: PinType(pins.Type),
MaxDepth: pins.MaxDepth,
Reference: ref,
PinOptions: pins.PinOptions,
}
}
// Clone returns a deep copy of the PinSerial.
func (pins PinSerial) Clone() PinSerial {
new := pins // this copy all the simple fields.
// slices are pointers. We need to explicitally copy them.
new.Allocations = make([]string, len(pins.Allocations))
copy(new.Allocations, pins.Allocations)
return new
}
// DecodeCid retrieves just the cid from a PinSerial without
// allocating a Pin.
func (pins PinSerial) DecodeCid() cid.Cid {
c, err := cid.Decode(pins.Cid)
if err != nil {
logger.Debug(pins.Cid, err)
}
return c
}
// NodeWithMeta specifies a block of data and a set of optional metadata fields
// carrying information about the encoded ipld node
type NodeWithMeta struct {
Data []byte
Cid string
CumSize uint64 // Cumulative size
Format string
Data []byte `codec:"d,omitempty"`
Cid cid.Cid `codec:"c, omitempty"`
CumSize uint64 `codec:"s,omitempty"` // Cumulative size
Format string `codec:"f,omitempty"`
}
// Size returns how big is the block. It is different from CumSize, which
@ -1102,11 +769,11 @@ func (n *NodeWithMeta) Size() uint64 {
// pin allocations by a PinAllocator. IPFS cluster is agnostic to
// the Value, which should be interpreted by the PinAllocator.
type Metric struct {
Name string
Peer peer.ID
Value string
Expire int64
Valid bool
Name string `json:"name" codec:"n,omitempty"`
Peer peer.ID `json:"peer" codec:"p,omitempty"`
Value string `json:"value" codec:"v,omitempty"`
Expire int64 `json:"expire" codec:"e,omitempty"`
Valid bool `json:"valid" codec:"d,omitempty"`
}
// SetTTL sets Metric to expire after the given time.Duration
@ -1132,51 +799,6 @@ func (m *Metric) Discard() bool {
return !m.Valid || m.Expired()
}
// MetricSerial is a helper for JSON marshaling. The Metric type is already
// serializable, but not pretty to humans (API).
type MetricSerial struct {
Name string `json:"name"`
Peer string `json:"peer"`
Value string `json:"value"`
Expire int64 `json:"expire"`
Valid bool `json:"valid"`
}
// MarshalJSON allows a Metric to produce a JSON representation
// of itself.
func (m *Metric) MarshalJSON() ([]byte, error) {
return json.Marshal(&MetricSerial{
Name: m.Name,
Peer: peer.IDB58Encode(m.Peer),
Value: m.Value,
Expire: m.Expire,
})
}
// UnmarshalJSON decodes JSON on top of the Metric.
func (m *Metric) UnmarshalJSON(j []byte) error {
if bytes.Equal(j, []byte("null")) {
return nil
}
ms := &MetricSerial{}
err := json.Unmarshal(j, ms)
if err != nil {
return err
}
p, err := peer.IDB58Decode(ms.Peer)
if err != nil {
return err
}
m.Name = ms.Name
m.Peer = p
m.Value = ms.Value
m.Expire = ms.Expire
return nil
}
// Alert carries alerting information about a peer. WIP.
type Alert struct {
Peer peer.ID
@ -1185,8 +807,8 @@ type Alert struct {
// Error can be used by APIs to return errors.
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
Code int `json:"code" codec:"o,omitempty"`
Message string `json:"message" codec:"m,omitempty"`
}
// Error implements the error interface and returns the error's message.
@ -1196,6 +818,6 @@ func (e *Error) Error() string {
// IPFSRepoStat wraps information about the IPFS repository.
type IPFSRepoStat struct {
RepoSize uint64
StorageMax uint64
RepoSize uint64 `codec:"r,omitempty"`
StorageMax uint64 `codec:"s, omitempty"`
}

View File

@ -1,7 +1,7 @@
package api
import (
"fmt"
"bytes"
"net/url"
"reflect"
"strings"
@ -11,6 +11,8 @@ import (
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/ugorji/go/codec"
)
var testTime = time.Date(2017, 12, 31, 15, 45, 50, 0, time.UTC)
@ -51,180 +53,6 @@ func TestIPFSPinStatusFromString(t *testing.T) {
}
}
func TestGlobalPinInfoConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
gpi := GlobalPinInfo{
Cid: testCid1,
PeerMap: map[peer.ID]PinInfo{
testPeerID1: {
Cid: testCid1,
Peer: testPeerID1,
Status: TrackerStatusPinned,
TS: testTime,
},
},
}
newgpi := gpi.ToSerial().ToGlobalPinInfo()
if gpi.Cid.String() != newgpi.Cid.String() {
t.Error("mismatching CIDs")
}
if gpi.PeerMap[testPeerID1].Cid.String() != newgpi.PeerMap[testPeerID1].Cid.String() {
t.Error("mismatching PinInfo CIDs")
}
if !gpi.PeerMap[testPeerID1].TS.Equal(newgpi.PeerMap[testPeerID1].TS) {
t.Error("bad time")
}
}
func TestIDConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
id := ID{
ID: testPeerID1,
Addresses: []ma.Multiaddr{testMAddr},
ClusterPeers: []peer.ID{testPeerID2},
ClusterPeersAddresses: []ma.Multiaddr{testMAddr2},
Version: "testv",
Commit: "ab",
RPCProtocolVersion: "testp",
Error: "teste",
IPFS: IPFSID{
ID: testPeerID2,
Addresses: []ma.Multiaddr{testMAddr3},
Error: "abc",
},
}
newid := id.ToSerial().ToID()
if id.ID != newid.ID {
t.Error("mismatching Peer IDs")
}
if !id.Addresses[0].Equal(newid.Addresses[0]) {
t.Error("mismatching addresses")
}
if id.ClusterPeers[0] != newid.ClusterPeers[0] {
t.Error("mismatching clusterPeers")
}
if !id.ClusterPeersAddresses[0].Equal(newid.ClusterPeersAddresses[0]) {
t.Error("mismatching clusterPeersAddresses")
}
if id.Version != newid.Version ||
id.Commit != newid.Commit ||
id.RPCProtocolVersion != newid.RPCProtocolVersion ||
id.Error != newid.Error {
t.Error("some field didn't survive")
}
if id.IPFS.ID != newid.IPFS.ID {
t.Error("ipfs daemon id mismatch")
}
if !id.IPFS.Addresses[0].Equal(newid.IPFS.Addresses[0]) {
t.Error("mismatching addresses")
}
if id.IPFS.Error != newid.IPFS.Error {
t.Error("ipfs error mismatch")
}
}
func TestConnectGraphConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
cg := ConnectGraph{
ClusterID: testPeerID1,
IPFSLinks: map[peer.ID][]peer.ID{
testPeerID4: []peer.ID{testPeerID5, testPeerID6},
testPeerID5: []peer.ID{testPeerID4, testPeerID6},
testPeerID6: []peer.ID{testPeerID4, testPeerID5},
},
ClusterLinks: map[peer.ID][]peer.ID{
testPeerID1: []peer.ID{testPeerID2, testPeerID3},
testPeerID2: []peer.ID{testPeerID1, testPeerID3},
testPeerID3: []peer.ID{testPeerID1, testPeerID2},
},
ClustertoIPFS: map[peer.ID]peer.ID{
testPeerID1: testPeerID4,
testPeerID2: testPeerID5,
testPeerID3: testPeerID6,
},
}
cgNew := cg.ToSerial().ToConnectGraph()
if !reflect.DeepEqual(cg, cgNew) {
t.Fatal("The new connect graph should be equivalent to the old")
}
}
func TestMultiaddrConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
addrs := []ma.Multiaddr{testMAddr2}
new := MultiaddrsToSerial(addrs).ToMultiaddrs()
if !addrs[0].Equal(new[0]) {
t.Error("mismatch")
}
}
func TestPinConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
c := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
newc := c.ToSerial().ToPin()
if !c.Cid.Equals(newc.Cid) ||
c.Allocations[0] != newc.Allocations[0] ||
c.ReplicationFactorMin != newc.ReplicationFactorMin ||
c.ReplicationFactorMax != newc.ReplicationFactorMax ||
c.MaxDepth != newc.MaxDepth ||
!c.Reference.Equals(newc.Reference) ||
c.Name != newc.Name || c.Type != newc.Type {
fmt.Printf("c: %+v\ncnew: %+v\n", c, newc)
t.Fatal("mismatch")
}
if !c.Equals(newc) {
t.Error("all pin fields are equal but Equals returns false")
}
}
func TestMetric(t *testing.T) {
m := Metric{
Name: "hello",
@ -265,48 +93,6 @@ func TestMetric(t *testing.T) {
}
}
func BenchmarkPinSerial_ToPin(b *testing.B) {
pin := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
pinS := pin.ToSerial()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pinS.ToPin()
}
}
func BenchmarkPinSerial_DecodeCid(b *testing.B) {
pin := Pin{
Cid: testCid1,
Type: ClusterDAGType,
Allocations: []peer.ID{testPeerID1},
Reference: testCid2,
MaxDepth: -1,
PinOptions: PinOptions{
ReplicationFactorMax: -1,
ReplicationFactorMin: -1,
Name: "A test pin",
},
}
pinS := pin.ToSerial()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pinS.DecodeCid()
}
}
func TestConvertPinType(t *testing.T) {
for _, t1 := range []PinType{BadType, ShardType} {
i := convertPinType(t1)
@ -344,10 +130,37 @@ func checkDupTags(t *testing.T, name string, typ reflect.Type, tags map[string]s
}
}
// TestPinTags checks that we are not re-using the same codec tag for
// different fields in the Pin object.
func TestPinTags(t *testing.T) {
typ := reflect.TypeOf(PinSerial{})
// TestDupTags checks that we are not re-using the same codec tag for
// different fields in the types objects.
func TestDupTags(t *testing.T) {
typ := reflect.TypeOf(Pin{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(GlobalPinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(PinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ConnectGraph{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(NodeWithMeta{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Metric{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Error{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(IPFSRepoStat{})
checkDupTags(t, "codec", typ, nil)
}
@ -400,3 +213,65 @@ func TestPinOptionsQuery(t *testing.T) {
}
}
}
func TestIDCodec(t *testing.T) {
TestPeerID1, _ := peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ := peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ := peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
addr, _ := NewMultiaddr("/ip4/1.2.3.4")
id := &ID{
ID: TestPeerID1,
Addresses: []Multiaddr{addr},
ClusterPeers: []peer.ID{TestPeerID2},
ClusterPeersAddresses: []Multiaddr{addr},
Version: "2",
Commit: "",
RPCProtocolVersion: "abc",
Error: "",
IPFS: IPFSID{
ID: TestPeerID3,
Addresses: []Multiaddr{addr},
Error: "",
},
Peername: "hi",
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(id)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var id2 ID
err = dec.Decode(&id2)
if err != nil {
t.Fatal(err)
}
}
func TestPinCodec(t *testing.T) {
ci, _ := cid.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
pin := PinCid(ci)
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(pin)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var pin2 Pin
err = dec.Decode(&pin2)
if err != nil {
t.Fatal(err)
}
}

View File

@ -5,6 +5,8 @@ import (
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
multiaddr "github.com/multiformats/go-multiaddr"
)
// PeersToStrings IDB58Encodes a list of peers.
@ -57,11 +59,12 @@ func Libp2pMultiaddrSplit(addr ma.Multiaddr) (peer.ID, ma.Multiaddr, error) {
// MustLibp2pMultiaddrJoin takes a LibP2P multiaddress and a peer ID and
// encapsulates a new /ipfs/<peerID> address. It will panic if the given
// peer ID is bad.
func MustLibp2pMultiaddrJoin(addr ma.Multiaddr, p peer.ID) ma.Multiaddr {
pidAddr, err := ma.NewMultiaddr("/ipfs/" + peer.IDB58Encode(p))
func MustLibp2pMultiaddrJoin(addr Multiaddr, p peer.ID) Multiaddr {
v := addr.Value()
pidAddr, err := multiaddr.NewMultiaddr("/ipfs/" + peer.IDB58Encode(p))
// let this break badly
if err != nil {
panic("called MustLibp2pMultiaddrJoin with bad peer!")
}
return addr.Encapsulate(pidAddr)
return Multiaddr{Multiaddr: v.Encapsulate(pidAddr)}
}

View File

@ -230,7 +230,7 @@ func (c *Cluster) syncWatcher() {
}
}
func (c *Cluster) sendInformerMetric(ctx context.Context) (api.Metric, error) {
func (c *Cluster) sendInformerMetric(ctx context.Context) (*api.Metric, error) {
ctx, span := trace.StartSpan(ctx, "cluster/sendInformerMetric")
defer span.End()
@ -288,7 +288,7 @@ func (c *Cluster) pushPingMetrics(ctx context.Context) {
ticker := time.NewTicker(c.config.MonitorPingInterval)
for {
metric := api.Metric{
metric := &api.Metric{
Name: pingMetricName,
Peer: c.id,
Valid: true,
@ -562,21 +562,26 @@ func (c *Cluster) Done() <-chan struct{} {
}
// ID returns information about the Cluster peer
func (c *Cluster) ID(ctx context.Context) api.ID {
func (c *Cluster) ID(ctx context.Context) *api.ID {
_, span := trace.StartSpan(ctx, "cluster/ID")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
// ignore error since it is included in response object
ipfsID, _ := c.ipfs.ID(ctx)
var addrs []ma.Multiaddr
ipfsID, err := c.ipfs.ID(ctx)
if err != nil {
ipfsID = &api.IPFSID{
Error: err.Error(),
}
}
var addrs []api.Multiaddr
addrsSet := make(map[string]struct{}) // to filter dups
for _, addr := range c.host.Addrs() {
addrsSet[addr.String()] = struct{}{}
}
for k := range addrsSet {
addr, _ := ma.NewMultiaddr(k)
addr, _ := api.NewMultiaddr(k)
addrs = append(addrs, api.MustLibp2pMultiaddrJoin(addr, c.id))
}
@ -587,7 +592,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
peers, _ = c.consensus.Peers(ctx)
}
return api.ID{
return &api.ID{
ID: c.id,
//PublicKey: c.host.Peerstore().PubKey(c.id),
Addresses: addrs,
@ -595,7 +600,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
ClusterPeersAddresses: c.peerManager.PeersAddresses(peers),
Version: version.Version.String(),
RPCProtocolVersion: version.RPCProtocol,
IPFS: ipfsID,
IPFS: *ipfsID,
Peername: c.config.Peername,
}
}
@ -610,7 +615,7 @@ func (c *Cluster) ID(ctx context.Context) api.ID {
//
// The new peer ID will be passed to the consensus
// component to be added to the peerset.
func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
_, span := trace.StartSpan(ctx, "cluster/PeerAdd")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -626,7 +631,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
err := c.consensus.AddPeer(ctx, pid)
if err != nil {
logger.Error(err)
id := api.ID{ID: pid, Error: err.Error()}
id := &api.ID{ID: pid, Error: err.Error()}
return id, err
}
@ -643,7 +648,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
logger.Error(err)
}
id := api.ID{}
id := &api.ID{}
// wait up to 2 seconds for new peer to catch up
// and return an up to date api.ID object.
@ -719,13 +724,13 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
// Note that PeerAdd() on the remote peer will
// figure out what our real address is (obviously not
// ListenAddr).
var myID api.IDSerial
var myID api.ID
err = c.rpcClient.CallContext(
ctx,
pid,
"Cluster",
"PeerAdd",
peer.IDB58Encode(c.id),
c.id,
&myID,
)
if err != nil {
@ -814,7 +819,7 @@ func (c *Cluster) StateSync(ctx context.Context) error {
// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers.
// If an error happens, the slice will contain as much information as
// could be fetched from other peers.
func (c *Cluster) StatusAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
func (c *Cluster) StatusAll(ctx context.Context) ([]*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/StatusAll")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -823,7 +828,7 @@ func (c *Cluster) StatusAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
}
// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer.
func (c *Cluster) StatusAllLocal(ctx context.Context) []api.PinInfo {
func (c *Cluster) StatusAllLocal(ctx context.Context) []*api.PinInfo {
_, span := trace.StartSpan(ctx, "cluster/StatusAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -834,7 +839,7 @@ func (c *Cluster) StatusAllLocal(ctx context.Context) []api.PinInfo {
// Status returns the GlobalPinInfo for a given Cid as fetched from all
// current peers. If an error happens, the GlobalPinInfo should contain
// as much information as could be fetched from the other peers.
func (c *Cluster) Status(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Status(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Status")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -843,7 +848,7 @@ func (c *Cluster) Status(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, err
}
// StatusLocal returns this peer's PinInfo for a given Cid.
func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) api.PinInfo {
func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) *api.PinInfo {
_, span := trace.StartSpan(ctx, "cluster/StatusLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -855,7 +860,7 @@ func (c *Cluster) StatusLocal(ctx context.Context, h cid.Cid) api.PinInfo {
// that the state of tracked items matches the state reported by the IPFS daemon
// and returning the results as GlobalPinInfo. If an error happens, the slice
// will contain as much information as could be fetched from the peers.
func (c *Cluster) SyncAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
func (c *Cluster) SyncAll(ctx context.Context) ([]*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/SyncAll")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -868,7 +873,7 @@ func (c *Cluster) SyncAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
//
// SyncAllLocal returns the list of PinInfo that where updated because of
// the operation, along with those in error states.
func (c *Cluster) SyncAllLocal(ctx context.Context) ([]api.PinInfo, error) {
func (c *Cluster) SyncAllLocal(ctx context.Context) ([]*api.PinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/SyncAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -885,7 +890,7 @@ func (c *Cluster) SyncAllLocal(ctx context.Context) ([]api.PinInfo, error) {
// Sync triggers a SyncLocal() operation for a given Cid.
// in all cluster peers.
func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Sync")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -897,14 +902,14 @@ func (c *Cluster) Sync(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error
func (c *Cluster) localPinInfoOp(
ctx context.Context,
h cid.Cid,
f func(context.Context, cid.Cid) (api.PinInfo, error),
) (pInfo api.PinInfo, err error) {
f func(context.Context, cid.Cid) (*api.PinInfo, error),
) (pInfo *api.PinInfo, err error) {
ctx, span := trace.StartSpan(ctx, "cluster/localPinInfoOp")
defer span.End()
cids, err := c.cidsFromMetaPin(ctx, h)
if err != nil {
return api.PinInfo{}, err
return nil, err
}
for _, ci := range cids {
@ -923,7 +928,7 @@ func (c *Cluster) localPinInfoOp(
// SyncLocal performs a local sync operation for the given Cid. This will
// tell the tracker to verify the status of the Cid against the IPFS daemon.
// It returns the updated PinInfo for the Cid.
func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo *api.PinInfo, err error) {
_, span := trace.StartSpan(ctx, "cluster/SyncLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -933,7 +938,7 @@ func (c *Cluster) SyncLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo,
// RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked
// by this peer.
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) {
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]*api.PinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -943,7 +948,7 @@ func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) {
// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (*api.GlobalPinInfo, error) {
_, span := trace.StartSpan(ctx, "cluster/Recover")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -953,7 +958,7 @@ func (c *Cluster) Recover(ctx context.Context, h cid.Cid) (api.GlobalPinInfo, er
// RecoverLocal triggers a recover operation for a given Cid in this peer only.
// It returns the updated PinInfo, after recovery.
func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo *api.PinInfo, err error) {
_, span := trace.StartSpan(ctx, "cluster/RecoverLocal")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -965,7 +970,7 @@ func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (pInfo api.PinInf
// of the current global state. This is the source of truth as to which
// pins are managed and their allocation, but does not indicate if
// the item is successfully pinned. For that, use StatusAll().
func (c *Cluster) Pins(ctx context.Context) []api.Pin {
func (c *Cluster) Pins(ctx context.Context) []*api.Pin {
_, span := trace.StartSpan(ctx, "cluster/Pins")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -973,7 +978,7 @@ func (c *Cluster) Pins(ctx context.Context) []api.Pin {
cState, err := c.consensus.State(ctx)
if err != nil {
logger.Error(err)
return []api.Pin{}
return []*api.Pin{}
}
return cState.List(ctx)
}
@ -984,14 +989,14 @@ func (c *Cluster) Pins(ctx context.Context) []api.Pin {
// assigned for the requested Cid, but does not indicate if
// the item is successfully pinned. For that, use Status(). PinGet
// returns an error if the given Cid is not part of the global state.
func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (api.Pin, error) {
func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/PinGet")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
st, err := c.consensus.State(ctx)
if err != nil {
return api.PinCid(h), err
return nil, err
}
pin, ok := st.Get(ctx, h)
if !ok {
@ -1016,7 +1021,7 @@ func (c *Cluster) PinGet(ctx context.Context, h cid.Cid) (api.Pin, error) {
// this set then the remaining peers are allocated in order from the rest of
// the cluster. Priority allocations are best effort. If any priority peers
// are unavailable then Pin will simply allocate from the rest of the cluster.
func (c *Cluster) Pin(ctx context.Context, pin api.Pin) error {
func (c *Cluster) Pin(ctx context.Context, pin *api.Pin) error {
_, span := trace.StartSpan(ctx, "cluster/Pin")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1044,7 +1049,7 @@ func (c *Cluster) setupReplicationFactor(pin *api.Pin) error {
func checkPinType(pin *api.Pin) error {
switch pin.Type {
case api.DataType:
if pin.Reference != cid.Undef {
if pin.Reference != nil {
return errors.New("data pins should not reference other pins")
}
case api.ShardType:
@ -1062,14 +1067,14 @@ func checkPinType(pin *api.Pin) error {
if pin.MaxDepth != 0 {
return errors.New("must pin roots directly")
}
if pin.Reference == cid.Undef {
if pin.Reference == nil {
return errors.New("clusterDAG pins should reference a Meta pin")
}
case api.MetaType:
if pin.Allocations != nil && len(pin.Allocations) != 0 {
return errors.New("meta pin should not specify allocations")
}
if pin.Reference == cid.Undef {
if pin.Reference == nil {
return errors.New("metaPins should reference a ClusterDAG")
}
@ -1102,7 +1107,7 @@ func (c *Cluster) setupPin(ctx context.Context, pin *api.Pin) error {
// able to evacuate a node and returns the pin object that it tried to pin, whether the pin was submitted
// to the consensus layer or skipped (due to error or to the fact
// that it was already valid) and errror.
func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, prioritylist []peer.ID) (api.Pin, bool, error) {
func (c *Cluster) pin(ctx context.Context, pin *api.Pin, blacklist []peer.ID, prioritylist []peer.ID) (*api.Pin, bool, error) {
ctx, span := trace.StartSpan(ctx, "cluster/pin")
defer span.End()
@ -1111,7 +1116,7 @@ func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, pri
}
// setup pin might produce some side-effects to our pin
err := c.setupPin(ctx, &pin)
err := c.setupPin(ctx, pin)
if err != nil {
return pin, false, err
}
@ -1147,7 +1152,7 @@ func (c *Cluster) pin(ctx context.Context, pin api.Pin, blacklist []peer.ID, pri
return pin, true, c.consensus.LogPin(ctx, pin)
}
func (c *Cluster) unpin(ctx context.Context, h cid.Cid) (api.Pin, error) {
func (c *Cluster) unpin(ctx context.Context, h cid.Cid) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/unpin")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1197,7 +1202,7 @@ func (c *Cluster) Unpin(ctx context.Context, h cid.Cid) error {
// nodes that it references. It handles the case where multiple parents
// reference the same metadata node, only unpinning those nodes without
// existing references
func (c *Cluster) unpinClusterDag(metaPin api.Pin) error {
func (c *Cluster) unpinClusterDag(metaPin *api.Pin) error {
ctx, span := trace.StartSpan(c.ctx, "cluster/unpinClusterDag")
defer span.End()
@ -1219,14 +1224,14 @@ func (c *Cluster) unpinClusterDag(metaPin api.Pin) error {
// PinPath pins an CID resolved from its IPFS Path. It returns the resolved
// Pin object.
func (c *Cluster) PinPath(ctx context.Context, path api.PinPath) (api.Pin, error) {
func (c *Cluster) PinPath(ctx context.Context, path *api.PinPath) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/PinPath")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
ci, err := c.ipfs.Resolve(ctx, path.Path)
if err != nil {
return api.Pin{}, err
return nil, err
}
p := api.PinCid(ci)
@ -1237,14 +1242,14 @@ func (c *Cluster) PinPath(ctx context.Context, path api.PinPath) (api.Pin, error
// UnpinPath unpins a CID resolved from its IPFS Path. If returns the
// previously pinned Pin object.
func (c *Cluster) UnpinPath(ctx context.Context, path string) (api.Pin, error) {
func (c *Cluster) UnpinPath(ctx context.Context, path string) (*api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/UnpinPath")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
ci, err := c.ipfs.Resolve(ctx, path)
if err != nil {
return api.Pin{}, err
return nil, err
}
return c.unpin(ctx, ci)
@ -1272,7 +1277,7 @@ func (c *Cluster) Version() string {
}
// Peers returns the IDs of the members of this Cluster.
func (c *Cluster) Peers(ctx context.Context) []api.ID {
func (c *Cluster) Peers(ctx context.Context) []*api.ID {
_, span := trace.StartSpan(ctx, "cluster/Peers")
defer span.End()
ctx = trace.NewContext(c.ctx, span)
@ -1281,12 +1286,11 @@ func (c *Cluster) Peers(ctx context.Context) []api.ID {
if err != nil {
logger.Error(err)
logger.Error("an empty list of peers will be returned")
return []api.ID{}
return []*api.ID{}
}
lenMembers := len(members)
peersSerial := make([]api.IDSerial, lenMembers, lenMembers)
peers := make([]api.ID, lenMembers, lenMembers)
peers := make([]*api.ID, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1297,43 +1301,37 @@ func (c *Cluster) Peers(ctx context.Context) []api.ID {
"Cluster",
"ID",
struct{}{},
rpcutil.CopyIDSerialsToIfaces(peersSerial),
rpcutil.CopyIDsToIfaces(peers),
)
for i, err := range errs {
if err != nil {
peersSerial[i].ID = peer.IDB58Encode(members[i])
peersSerial[i].Error = err.Error()
peers[i] = &api.ID{}
peers[i].ID = members[i]
peers[i].Error = err.Error()
}
}
for i, ps := range peersSerial {
peers[i] = ps.ToID()
}
return peers
}
func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid) (*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoCid")
defer span.End()
pin := api.GlobalPinInfo{
pin := &api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[peer.ID]api.PinInfo),
PeerMap: make(map[string]*api.PinInfo),
}
members, err := c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return api.GlobalPinInfo{}, err
return nil, err
}
lenMembers := len(members)
replies := make([]api.PinInfoSerial, lenMembers, lenMembers)
arg := api.Pin{
Cid: h,
}
replies := make([]*api.PinInfo, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1342,31 +1340,22 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid
members,
"Cluster",
method,
arg.ToSerial(),
rpcutil.CopyPinInfoSerialToIfaces(replies),
h,
rpcutil.CopyPinInfoToIfaces(replies),
)
for i, rserial := range replies {
for i, r := range replies {
e := errs[i]
// Potentially rserial is empty. But ToPinInfo ignores all
// errors from underlying libraries. In that case .Status
// will be TrackerStatusUndefined (0)
r := rserial.ToPinInfo()
// No error. Parse and continue
if e == nil {
pin.PeerMap[members[i]] = r
pin.PeerMap[peer.IDB58Encode(members[i])] = r
continue
}
// Deal with error cases (err != nil): wrap errors in PinInfo
// In this case, we had no answer at all. The contacted peer
// must be offline or unreachable.
if r.Status == api.TrackerStatusUndefined {
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
pin.PeerMap[members[i]] = api.PinInfo{
pin.PeerMap[peer.IDB58Encode(members[i])] = &api.PinInfo{
Cid: h,
Peer: members[i],
PeerName: members[i].String(),
@ -1374,31 +1363,26 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, method string, h cid.Cid
TS: time.Now(),
Error: e.Error(),
}
} else { // there was an rpc error, but got a valid response :S
r.Error = e.Error()
pin.PeerMap[members[i]] = r
// unlikely to come down this path
}
}
return pin, nil
}
func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.GlobalPinInfo, error) {
func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]*api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoSlice")
defer span.End()
infos := make([]api.GlobalPinInfo, 0)
fullMap := make(map[string]api.GlobalPinInfo)
infos := make([]*api.GlobalPinInfo, 0)
fullMap := make(map[cid.Cid]*api.GlobalPinInfo)
members, err := c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return []api.GlobalPinInfo{}, err
return nil, err
}
lenMembers := len(members)
replies := make([][]api.PinInfoSerial, lenMembers, lenMembers)
replies := make([][]*api.PinInfo, lenMembers, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1409,22 +1393,24 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
"Cluster",
method,
struct{}{},
rpcutil.CopyPinInfoSerialSliceToIfaces(replies),
rpcutil.CopyPinInfoSliceToIfaces(replies),
)
mergePins := func(pins []api.PinInfoSerial) {
for _, pserial := range pins {
p := pserial.ToPinInfo()
item, ok := fullMap[pserial.Cid]
mergePins := func(pins []*api.PinInfo) {
for _, p := range pins {
if p == nil {
continue
}
item, ok := fullMap[p.Cid]
if !ok {
fullMap[pserial.Cid] = api.GlobalPinInfo{
fullMap[p.Cid] = &api.GlobalPinInfo{
Cid: p.Cid,
PeerMap: map[peer.ID]api.PinInfo{
p.Peer: p,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(p.Peer): p,
},
}
} else {
item.PeerMap[p.Peer] = p
item.PeerMap[peer.IDB58Encode(p.Peer)] = p
}
}
}
@ -1441,9 +1427,8 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
// Merge any errors
for p, msg := range erroredPeers {
for cidStr := range fullMap {
c, _ := cid.Decode(cidStr)
fullMap[cidStr].PeerMap[p] = api.PinInfo{
for c := range fullMap {
fullMap[c].PeerMap[peer.IDB58Encode(p)] = &api.PinInfo{
Cid: c,
Peer: p,
Status: api.TrackerStatusClusterError,
@ -1460,25 +1445,25 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, method string) ([]api.
return infos, nil
}
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (api.ID, error) {
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/getIDForPeer")
defer span.End()
idSerial := api.ID{ID: pid}.ToSerial()
var id api.ID
err := c.rpcClient.CallContext(
ctx,
pid,
"Cluster",
"ID",
struct{}{},
&idSerial,
&id,
)
id := idSerial.ToID()
if err != nil {
logger.Error(err)
id.ID = pid
id.Error = err.Error()
}
return id, err
return &id, err
}
// cidsFromMetaPin expands a meta-pin and returns a list of Cids that
@ -1506,8 +1491,11 @@ func (c *Cluster) cidsFromMetaPin(ctx context.Context, h cid.Cid) ([]cid.Cid, er
return list, nil
}
list = append([]cid.Cid{pin.Reference}, list...)
clusterDagPin, err := c.PinGet(ctx, pin.Reference)
if pin.Reference == nil {
return nil, errors.New("metaPin.Reference is unset")
}
list = append([]cid.Cid{*pin.Reference}, list...)
clusterDagPin, err := c.PinGet(ctx, *pin.Reference)
if err != nil {
return list, fmt.Errorf("could not get clusterDAG pin from state. Malformed pin?: %s", err)
}

View File

@ -55,9 +55,9 @@ type mockConnector struct {
blocks sync.Map
}
func (ipfs *mockConnector) ID(ctx context.Context) (api.IPFSID, error) {
return api.IPFSID{
ID: test.TestPeerID1,
func (ipfs *mockConnector) ID(ctx context.Context) (*api.IPFSID, error) {
return &api.IPFSID{
ID: test.PeerID1,
}, nil
}
@ -101,12 +101,12 @@ func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string
return m, nil
}
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
return []peer.ID{test.TestPeerID4, test.TestPeerID5}, nil
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
return []peer.ID{test.PeerID4, test.PeerID5}, nil
}
func (ipfs *mockConnector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
return api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
func (ipfs *mockConnector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error) {
return &api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
}
func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
@ -115,13 +115,13 @@ func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, e
return cid.Undef, err
}
return test.MustDecodeCid(test.TestCidResolved), nil
return test.CidResolved, nil
}
func (ipfs *mockConnector) ConnectSwarms(ctx context.Context) error { return nil }
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
func (ipfs *mockConnector) BlockPut(ctx context.Context, nwm api.NodeWithMeta) error {
ipfs.blocks.Store(nwm.Cid, nwm.Data)
func (ipfs *mockConnector) BlockPut(ctx context.Context, nwm *api.NodeWithMeta) error {
ipfs.blocks.Store(nwm.Cid.String(), nwm.Data)
return nil
}
@ -218,7 +218,7 @@ func TestClusterStateSync(t *testing.T) {
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err = cl.Pin(ctx, api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
@ -264,7 +264,7 @@ func TestClusterPin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err := cl.Pin(ctx, api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
@ -287,16 +287,16 @@ func TestClusterPinPath(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
pin, err := cl.PinPath(ctx, api.PinPath{Path: test.TestPathIPFS2})
pin, err := cl.PinPath(ctx, &api.PinPath{Path: test.PathIPFS2})
if err != nil {
t.Fatal("pin should have worked:", err)
}
if pin.Cid.String() != test.TestCidResolved {
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
// test an error case
_, err = cl.PinPath(ctx, api.PinPath{Path: test.TestInvalidPath1})
_, err = cl.PinPath(ctx, &api.PinPath{Path: test.InvalidPath1})
if err == nil {
t.Error("expected an error but things worked")
}
@ -390,7 +390,7 @@ func TestUnpinShard(t *testing.T) {
pinnedCids := []cid.Cid{}
pinnedCids = append(pinnedCids, root)
metaPin, _ := cl.PinGet(ctx, root)
cDag, _ := cl.PinGet(ctx, metaPin.Reference)
cDag, _ := cl.PinGet(ctx, *metaPin.Reference)
pinnedCids = append(pinnedCids, cDag.Cid)
cDagBlock, _ := cl.ipfs.BlockGet(ctx, cDag.Cid)
cDagNode, _ := sharding.CborDataToNode(cDagBlock, "cbor")
@ -440,9 +440,9 @@ func TestUnpinShard(t *testing.T) {
}
// func singleShardedPin(t *testing.T, cl *Cluster) {
// cShard, _ := cid.Decode(test.TestShardCid)
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cMeta, _ := cid.Decode(test.TestMetaRootCid)
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// pinMeta(t, cl, []cid.Cid{cShard}, cCdag, cMeta)
// }
@ -509,8 +509,8 @@ func TestUnpinShard(t *testing.T) {
// t.Fatal("should have 3 pins")
// }
// // Unpinning metadata should fail
// cShard, _ := cid.Decode(test.TestShardCid)
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// err := cl.Unpin(cShard)
// if err == nil {
@ -533,7 +533,7 @@ func TestUnpinShard(t *testing.T) {
// t.Fatal("should have 3 pins")
// }
// // Unpinning from root should work
// cMeta, _ := cid.Decode(test.TestMetaRootCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// err := cl.Unpin(cMeta)
// if err != nil {
@ -544,10 +544,10 @@ func TestUnpinShard(t *testing.T) {
// func pinTwoParentsOneShard(t *testing.T, cl *Cluster) {
// singleShardedPin(t, cl)
// cShard, _ := cid.Decode(test.TestShardCid)
// cShard2, _ := cid.Decode(test.TestShardCid2)
// cCdag2, _ := cid.Decode(test.TestCdagCid2)
// cMeta2, _ := cid.Decode(test.TestMetaRootCid2)
// cShard, _ := cid.Decode(test.ShardCid)
// cShard2, _ := cid.Decode(test.ShardCid2)
// cCdag2, _ := cid.Decode(test.CdagCid2)
// cMeta2, _ := cid.Decode(test.MetaRootCid2)
// pinMeta(t, cl, []cid.Cid{cShard, cShard2}, cCdag2, cMeta2)
// shardPin, err := cl.PinGet(cShard)
@ -574,7 +574,7 @@ func TestUnpinShard(t *testing.T) {
// pinTwoParentsOneShard(t, cl)
// cShard, _ := cid.Decode(test.TestShardCid)
// cShard, _ := cid.Decode(test.ShardCid)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard should be pinned")
@ -593,7 +593,7 @@ func TestUnpinShard(t *testing.T) {
// if len(cl.Pins()) != 6 {
// t.Fatal("should have 6 pins")
// }
// cMeta2, _ := cid.Decode(test.TestMetaRootCid2)
// cMeta2, _ := cid.Decode(test.MetaRootCid2)
// err := cl.Unpin(cMeta2)
// if err != nil {
// t.Error(err)
@ -605,8 +605,8 @@ func TestUnpinShard(t *testing.T) {
// t.Fatal("should have 3 pins")
// }
// cShard, _ := cid.Decode(test.TestShardCid)
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard node should still be pinned")
@ -627,7 +627,7 @@ func TestUnpinShard(t *testing.T) {
// t.Fatal("should have 6 pins")
// }
// cMeta, _ := cid.Decode(test.TestMetaRootCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// err := cl.Unpin(cMeta)
// if err != nil {
// t.Error(err)
@ -636,9 +636,9 @@ func TestUnpinShard(t *testing.T) {
// t.Fatal("should have 4 pins")
// }
// cShard, _ := cid.Decode(test.TestShardCid)
// cShard2, _ := cid.Decode(test.TestShardCid2)
// cCdag2, _ := cid.Decode(test.TestCdagCid2)
// cShard, _ := cid.Decode(test.ShardCid)
// cShard2, _ := cid.Decode(test.ShardCid2)
// cCdag2, _ := cid.Decode(test.CdagCid2)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard node should still be pinned")
@ -659,14 +659,14 @@ func TestUnpinShard(t *testing.T) {
// defer cl.Shutdown()
// // First pin normally then sharding pin fails
// c, _ := cid.Decode(test.TestMetaRootCid)
// c, _ := cid.Decode(test.MetaRootCid)
// err := cl.Pin(api.PinCid(c))
// if err != nil {
// t.Fatal("pin should have worked:", err)
// }
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cMeta, _ := cid.Decode(test.TestMetaRootCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// metaPin := api.Pin{
// Cid: cMeta,
// Type: api.MetaType,
@ -694,8 +694,8 @@ func TestUnpinShard(t *testing.T) {
// defer cleanRaft()
// defer cl.Shutdown()
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cShard, _ := cid.Decode(test.TestShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// cShard, _ := cid.Decode(test.ShardCid)
// shardPin := api.Pin{
// Cid: cShard,
// Type: api.ShardType,
@ -730,7 +730,7 @@ func TestClusterPins(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err := cl.Pin(ctx, api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
@ -751,7 +751,7 @@ func TestClusterPinGet(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err := cl.Pin(ctx, api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
@ -765,8 +765,7 @@ func TestClusterPinGet(t *testing.T) {
t.Error("the Pin does not look as expected")
}
c2, _ := cid.Decode(test.TestCid2)
_, err = cl.PinGet(ctx, c2)
_, err = cl.PinGet(ctx, test.Cid2)
if err == nil {
t.Fatal("expected an error")
}
@ -778,7 +777,7 @@ func TestClusterUnpin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
// Unpin should error without pin being committed to state
err := cl.Unpin(ctx, c)
if err == nil {
@ -810,25 +809,25 @@ func TestClusterUnpinPath(t *testing.T) {
defer cl.Shutdown(ctx)
// Unpin should error without pin being committed to state
_, err := cl.UnpinPath(ctx, test.TestPathIPFS2)
_, err := cl.UnpinPath(ctx, test.PathIPFS2)
if err == nil {
t.Error("unpin with path should have failed")
}
// Unpin after pin should succeed
pin, err := cl.PinPath(ctx, api.PinPath{Path: test.TestPathIPFS2})
pin, err := cl.PinPath(ctx, &api.PinPath{Path: test.PathIPFS2})
if err != nil {
t.Fatal("pin with should have worked:", err)
}
if pin.Cid.String() != test.TestCidResolved {
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
pin, err = cl.UnpinPath(ctx, test.TestPathIPFS2)
pin, err = cl.UnpinPath(ctx, test.PathIPFS2)
if err != nil {
t.Error("unpin with path should have worked:", err)
}
if pin.Cid.String() != test.TestCidResolved {
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
}
@ -866,8 +865,7 @@ func TestClusterRecoverAllLocal(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(ctx, api.PinCid(c))
err := cl.Pin(ctx, api.PinCid(test.Cid1))
if err != nil {
t.Fatal("pin should have worked:", err)
}

View File

@ -22,64 +22,17 @@ func jsonFormatObject(resp interface{}) {
switch resp.(type) {
case nil:
return
case api.ID:
jsonFormatPrint(resp.(api.ID).ToSerial())
case api.GlobalPinInfo:
jsonFormatPrint(resp.(api.GlobalPinInfo).ToSerial())
case api.Pin:
jsonFormatPrint(resp.(api.Pin).ToSerial())
case api.AddedOutput:
jsonFormatPrint(resp.(api.AddedOutput))
case addedOutputQuiet:
// print original object as in JSON it does
// not make sense to have a human "quiet" output.
jsonFormatPrint(resp.(addedOutputQuiet).added)
case api.Version:
jsonFormatPrint(resp.(api.Version))
case api.Metric:
serial := resp.(api.Metric)
textFormatPrintMetric(&serial)
case api.Error:
jsonFormatPrint(resp.(api.Error))
case []api.ID:
r := resp.([]api.ID)
serials := make([]api.IDSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.GlobalPinInfo:
r := resp.([]api.GlobalPinInfo)
serials := make([]api.GlobalPinInfoSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.Pin:
r := resp.([]api.Pin)
serials := make([]api.PinSerial, len(r), len(r))
for i, item := range r {
serials[i] = item.ToSerial()
}
jsonFormatPrint(serials)
case []api.AddedOutput:
serials := resp.([]api.AddedOutput)
jsonFormatPrint(serials)
case []addedOutputQuiet:
case []*addedOutputQuiet:
// print original objects as in JSON it makes
// no sense to have a human "quiet" output
serials := resp.([]addedOutputQuiet)
serials := resp.([]*addedOutputQuiet)
var actual []*api.AddedOutput
for _, s := range serials {
actual = append(actual, s.added)
}
jsonFormatPrint(actual)
case []api.Metric:
serials := resp.([]api.Metric)
jsonFormatPrint(serials)
default:
checkErr("", errors.New("unsupported type returned"))
jsonFormatPrint(resp)
}
}
@ -93,52 +46,44 @@ func textFormatObject(resp interface{}) {
switch resp.(type) {
case nil:
return
case api.ID:
serial := resp.(api.ID).ToSerial()
textFormatPrintIDSerial(&serial)
case api.GlobalPinInfo:
serial := resp.(api.GlobalPinInfo).ToSerial()
textFormatPrintGPInfo(&serial)
case api.Pin:
serial := resp.(api.Pin).ToSerial()
textFormatPrintPin(&serial)
case api.AddedOutput:
serial := resp.(api.AddedOutput)
textFormatPrintAddedOutput(&serial)
case addedOutputQuiet:
serial := resp.(addedOutputQuiet)
textFormatPrintAddedOutputQuiet(&serial)
case api.Version:
serial := resp.(api.Version)
textFormatPrintVersion(&serial)
case api.Error:
serial := resp.(api.Error)
textFormatPrintError(&serial)
case api.Metric:
serial := resp.(api.Metric)
textFormatPrintMetric(&serial)
case []api.ID:
for _, item := range resp.([]api.ID) {
case *api.ID:
textFormatPrintID(resp.(*api.ID))
case *api.GlobalPinInfo:
textFormatPrintGPInfo(resp.(*api.GlobalPinInfo))
case *api.Pin:
textFormatPrintPin(resp.(*api.Pin))
case *api.AddedOutput:
textFormatPrintAddedOutput(resp.(*api.AddedOutput))
case *addedOutputQuiet:
textFormatPrintAddedOutputQuiet(resp.(*addedOutputQuiet))
case *api.Version:
textFormatPrintVersion(resp.(*api.Version))
case *api.Error:
textFormatPrintError(resp.(*api.Error))
case *api.Metric:
textFormatPrintMetric(resp.(*api.Metric))
case []*api.ID:
for _, item := range resp.([]*api.ID) {
textFormatObject(item)
}
case []api.GlobalPinInfo:
for _, item := range resp.([]api.GlobalPinInfo) {
case []*api.GlobalPinInfo:
for _, item := range resp.([]*api.GlobalPinInfo) {
textFormatObject(item)
}
case []api.Pin:
for _, item := range resp.([]api.Pin) {
case []*api.Pin:
for _, item := range resp.([]*api.Pin) {
textFormatObject(item)
}
case []api.AddedOutput:
for _, item := range resp.([]api.AddedOutput) {
case []*api.AddedOutput:
for _, item := range resp.([]*api.AddedOutput) {
textFormatObject(item)
}
case []addedOutputQuiet:
for _, item := range resp.([]addedOutputQuiet) {
case []*addedOutputQuiet:
for _, item := range resp.([]*addedOutputQuiet) {
textFormatObject(item)
}
case []api.Metric:
for _, item := range resp.([]api.Metric) {
case []*api.Metric:
for _, item := range resp.([]*api.Metric) {
textFormatObject(item)
}
default:
@ -146,16 +91,22 @@ func textFormatObject(resp interface{}) {
}
}
func textFormatPrintIDSerial(obj *api.IDSerial) {
func textFormatPrintID(obj *api.ID) {
if obj.Error != "" {
fmt.Printf("%s | ERROR: %s\n", obj.ID, obj.Error)
return
}
fmt.Printf("%s | %s | Sees %d other peers\n", obj.ID, obj.Peername, len(obj.ClusterPeers)-1)
fmt.Printf(
"%s | %s | Sees %d other peers\n",
obj.ID.Pretty(),
obj.Peername,
len(obj.ClusterPeers)-1,
)
addrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.Addresses {
addrs = append(addrs, string(a))
addrs = append(addrs, a.String())
}
addrs.Sort()
fmt.Println(" > Addresses:")
@ -169,7 +120,7 @@ func textFormatPrintIDSerial(obj *api.IDSerial) {
ipfsAddrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.IPFS.Addresses {
ipfsAddrs = append(ipfsAddrs, string(a))
ipfsAddrs = append(ipfsAddrs, a.String())
}
ipfsAddrs.Sort()
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID)
@ -178,33 +129,34 @@ func textFormatPrintIDSerial(obj *api.IDSerial) {
}
}
func textFormatPrintGPInfo(obj *api.GlobalPinInfoSerial) {
func textFormatPrintGPInfo(obj *api.GlobalPinInfo) {
fmt.Printf("%s :\n", obj.Cid)
peers := make(sort.StringSlice, 0, len(obj.PeerMap))
peers := make([]string, 0, len(obj.PeerMap))
for k := range obj.PeerMap {
peers = append(peers, k)
}
peers.Sort()
sort.Strings(peers)
for _, k := range peers {
v := obj.PeerMap[k]
if len(v.PeerName) > 0 {
fmt.Printf(" > %-15s : %s", v.PeerName, strings.ToUpper(v.Status))
fmt.Printf(" > %-15s : %s", v.PeerName, strings.ToUpper(v.Status.String()))
} else {
fmt.Printf(" > %-15s : %s", k, strings.ToUpper(v.Status))
fmt.Printf(" > %-15s : %s", k, strings.ToUpper(v.Status.String()))
}
if v.Error != "" {
fmt.Printf(": %s", v.Error)
}
fmt.Printf(" | %s\n", v.TS)
txt, _ := v.TS.MarshalText()
fmt.Printf(" | %s\n", txt)
}
}
func textFormatPrintPInfo(obj *api.PinInfoSerial) {
gpinfo := api.GlobalPinInfoSerial{
func textFormatPrintPInfo(obj *api.PinInfo) {
gpinfo := api.GlobalPinInfo{
Cid: obj.Cid,
PeerMap: map[string]api.PinInfoSerial{
obj.Peer: *obj,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(obj.Peer): obj,
},
}
textFormatPrintGPInfo(&gpinfo)
@ -214,14 +166,14 @@ func textFormatPrintVersion(obj *api.Version) {
fmt.Println(obj.Version)
}
func textFormatPrintPin(obj *api.PinSerial) {
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, strings.ToUpper(obj.ToPin().Type.String()))
func textFormatPrintPin(obj *api.Pin) {
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, strings.ToUpper(obj.Type.String()))
if obj.ReplicationFactorMin < 0 {
fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]")
} else {
var sortAlloc sort.StringSlice = obj.Allocations
sortAlloc.Sort()
sortAlloc := api.PeersToStrings(obj.Allocations)
sort.Strings(sortAlloc)
fmt.Printf("Repl. Factor: %d--%d | Allocations: %s",
obj.ReplicationFactorMin, obj.ReplicationFactorMax,
sortAlloc)

View File

@ -39,19 +39,21 @@ var errUnfinishedWrite = errors.New("could not complete write of line to output"
var errUnknownNodeType = errors.New("unsupported node type. Expected cluster or ipfs")
var errCorruptOrdering = errors.New("expected pid to have an ordering within dot writer")
func makeDot(cg api.ConnectGraphSerial, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]string)
func makeDot(cg *api.ConnectGraph, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]peer.ID)
for k, v := range cg.IPFSLinks {
ipfsEdges[k] = make([]string, 0)
ipfsEdges[k] = make([]peer.ID, 0)
for _, id := range v {
if _, ok := cg.IPFSLinks[id]; ok || allIpfs {
strPid := peer.IDB58Encode(id)
if _, ok := cg.IPFSLinks[strPid]; ok || allIpfs {
ipfsEdges[k] = append(ipfsEdges[k], id)
}
if allIpfs { // include all swarm peers in the graph
if _, ok := ipfsEdges[id]; !ok {
if _, ok := ipfsEdges[strPid]; !ok {
// if id in IPFSLinks this will be overwritten
// if id not in IPFSLinks this will stay blank
ipfsEdges[id] = make([]string, 0)
ipfsEdges[strPid] = make([]peer.ID, 0)
}
}
}
@ -76,15 +78,16 @@ type dotWriter struct {
w io.Writer
dotGraph dot.Graph
ipfsEdges map[string][]string
clusterEdges map[string][]string
clusterIpfsEdges map[string]string
ipfsEdges map[string][]peer.ID
clusterEdges map[string][]peer.ID
clusterIpfsEdges map[string]peer.ID
}
// writes nodes to dot file output and creates and stores an ordering over nodes
func (dW *dotWriter) addNode(id string, nT nodeType) error {
var node dot.VertexDescription
node.Label = shortID(id)
pid, _ := peer.IDB58Decode(id)
node.Label = pid.String()
switch nT {
case tCluster:
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
@ -130,7 +133,7 @@ func (dW *dotWriter) print() error {
for k, v := range dW.clusterEdges {
for _, id := range v {
toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[id]
fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
}
@ -140,7 +143,7 @@ func (dW *dotWriter) print() error {
// Write cluster to ipfs edges
for k, id := range dW.clusterIpfsEdges {
toNode := dW.clusterNodes[k]
fromNode := dW.ipfsNodes[id]
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
dW.dotGraph.AddNewLine()
@ -150,14 +153,14 @@ func (dW *dotWriter) print() error {
for k, v := range dW.ipfsEdges {
for _, id := range v {
toNode := dW.ipfsNodes[k]
fromNode := dW.ipfsNodes[id]
fromNode := dW.ipfsNodes[peer.IDB58Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true)
}
}
return dW.dotGraph.WriteDot(dW.w)
}
func sortedKeys(dict map[string][]string) []string {
func sortedKeys(dict map[string][]peer.ID) []string {
keys := make([]string, len(dict), len(dict))
i := 0
for k := range dict {
@ -167,17 +170,3 @@ func sortedKeys(dict map[string][]string) []string {
sort.Strings(keys)
return keys
}
// truncate the provided peer id string to the 3 last characters. Odds of
// pairwise collisions are less than 1 in 200,000 so with 70 cluster peers
// the chances of a collision are still less than 1 in 100 (birthday paradox).
// As clusters grow bigger than this we can provide a flag for including
// more characters.
func shortID(peerString string) string {
pid, err := peer.IDB58Decode(peerString)
if err != nil {
// Should never get here, panic
panic("shortID called on non-pid string")
}
return pid.String()
}

View File

@ -7,6 +7,8 @@ import (
"strings"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
@ -66,45 +68,58 @@ I2 -> I1
}`
var (
pid1, _ = peer.IDB58Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
pid2, _ = peer.IDB58Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ")
pid3, _ = peer.IDB58Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu")
pid4, _ = peer.IDB58Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV")
pid5, _ = peer.IDB58Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq")
pid6, _ = peer.IDB58Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL")
pid7, _ = peer.IDB58Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb")
pid8, _ = peer.IDB58Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8")
pid9, _ = peer.IDB58Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD")
)
func TestSimpleIpfsGraphs(t *testing.T) {
cg := api.ConnectGraphSerial{
ClusterID: "QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
ClusterLinks: map[string][]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": []string{
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
pid2,
pid3,
},
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
peer.IDB58Encode(pid2): []peer.ID{
pid1,
pid3,
},
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
peer.IDB58Encode(pid3): []peer.ID{
pid1,
pid2,
},
},
IPFSLinks: map[string][]string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV": []string{
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
pid5,
pid6,
},
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
peer.IDB58Encode(pid5): []peer.ID{
pid4,
pid6,
},
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
peer.IDB58Encode(pid6): []peer.ID{
pid4,
pid5,
},
},
ClustertoIPFS: map[string]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": "QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": "QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": "QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, false)
err := makeDot(&cg, buf, false)
if err != nil {
t.Fatal(err)
}
@ -161,54 +176,54 @@ I4 -> I5
}`
func TestIpfsAllGraphs(t *testing.T) {
cg := api.ConnectGraphSerial{
ClusterID: "QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
ClusterLinks: map[string][]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": []string{
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
pid2,
pid3,
},
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu",
peer.IDB58Encode(pid2): []peer.ID{
pid1,
pid3,
},
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": []string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ",
peer.IDB58Encode(pid3): []peer.ID{
pid1,
pid2,
},
},
IPFSLinks: map[string][]string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV": []string{
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
pid5,
pid6,
pid7,
pid8,
pid9,
},
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
peer.IDB58Encode(pid5): []peer.ID{
pid4,
pid6,
pid7,
pid8,
pid9,
},
"QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL": []string{
"QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb",
"QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8",
"QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD",
peer.IDB58Encode(pid6): []peer.ID{
pid4,
pid5,
pid7,
pid8,
pid9,
},
},
ClustertoIPFS: map[string]string{
"QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD": "QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV",
"QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ": "QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq",
"QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu": "QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL",
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, true)
err := makeDot(&cg, buf, true)
if err != nil {
t.Fatal(err)
}

View File

@ -417,13 +417,13 @@ cluster "pin add".
go func() {
defer wg.Done()
var buffered []addedOutputQuiet
var lastBuf = make([]addedOutputQuiet, 1, 1)
var buffered []*addedOutputQuiet
var lastBuf = make([]*addedOutputQuiet, 1, 1)
var qq = c.Bool("quieter")
var q = c.Bool("quiet") || qq
var bufferResults = c.Bool("no-stream")
for v := range out {
added := addedOutputQuiet{v, q}
added := &addedOutputQuiet{v, q}
lastBuf[0] = added
if bufferResults {
buffered = append(buffered, added)
@ -940,11 +940,11 @@ func parseCredentials(userInput string) (string, string) {
func handlePinResponseFormatFlags(
ctx context.Context,
c *cli.Context,
pin api.Pin,
pin *api.Pin,
target api.TrackerStatus,
) {
var status api.GlobalPinInfo
var status *api.GlobalPinInfo
var cerr error
if c.Bool("wait") {
@ -957,7 +957,7 @@ func handlePinResponseFormatFlags(
return
}
if status.Cid == cid.Undef { // no status from "wait"
if status == nil { // no status from "wait"
time.Sleep(time.Second)
status, cerr = globalClient.Status(ctx, pin.Cid, false)
}
@ -968,7 +968,7 @@ func waitFor(
ci cid.Cid,
target api.TrackerStatus,
timeout time.Duration,
) (api.GlobalPinInfo, error) {
) (*api.GlobalPinInfo, error) {
ctx := context.Background()

View File

@ -114,16 +114,16 @@ func stateImport(ctx context.Context, r io.Reader) error {
return err
}
pinSerials := make([]api.PinSerial, 0)
pins := make([]*api.Pin, 0)
dec := json.NewDecoder(r)
err = dec.Decode(&pinSerials)
err = dec.Decode(&pins)
if err != nil {
return err
}
stateToImport := mapstate.NewMapState()
for _, pS := range pinSerials {
err = stateToImport.Add(ctx, pS.ToPin())
for _, p := range pins {
err = stateToImport.Add(ctx, p)
if err != nil {
return err
}
@ -170,15 +170,11 @@ func exportState(ctx context.Context, state state.State, w io.Writer) error {
// Serialize pins
pins := state.List(ctx)
pinSerials := make([]api.PinSerial, len(pins), len(pins))
for i, pin := range pins {
pinSerials[i] = pin.ToSerial()
}
// Write json to output file
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(pinSerials)
return enc.Encode(pins)
}
// CleanupState cleans the state

View File

@ -2,6 +2,7 @@ package ipfscluster
import (
peer "github.com/libp2p/go-libp2p-peer"
"go.opencensus.io/trace"
"github.com/ipfs/ipfs-cluster/api"
@ -15,16 +16,17 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
defer span.End()
cg := api.ConnectGraph{
IPFSLinks: make(map[peer.ID][]peer.ID),
ClusterLinks: make(map[peer.ID][]peer.ID),
ClustertoIPFS: make(map[peer.ID]peer.ID),
ClusterID: c.host.ID(),
IPFSLinks: make(map[string][]peer.ID),
ClusterLinks: make(map[string][]peer.ID),
ClustertoIPFS: make(map[string]peer.ID),
}
members, err := c.consensus.Peers(ctx)
if err != nil {
return cg, err
}
peersSerials := make([][]api.IDSerial, len(members), len(members))
peers := make([][]*api.ID, len(members), len(members))
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(members))
defer rpcutil.MultiCancel(cancels)
@ -35,22 +37,22 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
"Cluster",
"Peers",
struct{}{},
rpcutil.CopyIDSerialSliceToIfaces(peersSerials),
rpcutil.CopyIDSliceToIfaces(peers),
)
for i, err := range errs {
p := members[i]
p := peer.IDB58Encode(members[i])
cg.ClusterLinks[p] = make([]peer.ID, 0)
if err != nil { // Only setting cluster connections when no error occurs
logger.Debugf("RPC error reaching cluster peer %s: %s", p.Pretty(), err.Error())
logger.Debugf("RPC error reaching cluster peer %s: %s", p, err.Error())
continue
}
selfConnection, pID := c.recordClusterLinks(&cg, p, peersSerials[i])
selfConnection, pID := c.recordClusterLinks(&cg, p, peers[i])
// IPFS connections
if !selfConnection {
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p.Pretty())
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p)
continue
}
c.recordIPFSLinks(&cg, pID)
@ -59,16 +61,15 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
return cg, nil
}
func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p peer.ID, sPeers []api.IDSerial) (bool, api.ID) {
func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []*api.ID) (bool, *api.ID) {
selfConnection := false
var pID api.ID
for _, sID := range sPeers {
id := sID.ToID()
var pID *api.ID
for _, id := range peers {
if id.Error != "" {
logger.Debugf("Peer %s errored connecting to its peer %s", p.Pretty(), id.ID.Pretty())
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty())
continue
}
if id.ID == p {
if peer.IDB58Encode(id.ID) == p {
selfConnection = true
pID = id
} else {
@ -78,27 +79,31 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p peer.ID, sPeers []a
return selfConnection, pID
}
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID api.ID) {
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID *api.ID) {
ipfsID := pID.IPFS.ID
if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs
logger.Warningf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
return
}
if _, ok := cg.IPFSLinks[pID.ID]; ok {
pid := peer.IDB58Encode(pID.ID)
ipfsPid := peer.IDB58Encode(ipfsID)
if _, ok := cg.IPFSLinks[pid]; ok {
logger.Warningf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
}
cg.ClustertoIPFS[pID.ID] = ipfsID
cg.IPFSLinks[ipfsID] = make([]peer.ID, 0)
var swarmPeersS api.SwarmPeersSerial
err := c.rpcClient.Call(pID.ID,
cg.ClustertoIPFS[pid] = ipfsID
cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0)
var swarmPeers []peer.ID
err := c.rpcClient.Call(
pID.ID,
"Cluster",
"IPFSSwarmPeers",
struct{}{},
&swarmPeersS,
&swarmPeers,
)
if err != nil {
return
}
swarmPeers := swarmPeersS.ToSwarmPeers()
cg.IPFSLinks[ipfsID] = swarmPeers
cg.IPFSLinks[ipfsPid] = swarmPeers
}

View File

@ -215,9 +215,9 @@ func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
return cc.readyCh
}
func (cc *Consensus) op(ctx context.Context, pin api.Pin, t LogOpType) *LogOp {
func (cc *Consensus) op(ctx context.Context, pin *api.Pin, t LogOpType) *LogOp {
return &LogOp{
Cid: pin.ToSerial(),
Cid: pin,
Type: t,
}
}
@ -272,8 +272,7 @@ func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, err
&struct{}{},
)
if finalErr != nil {
logger.Error(finalErr)
logger.Error("retrying to redirect request to leader")
logger.Errorf("retrying to redirect request to leader: %s", finalErr)
time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout)
continue
}
@ -342,12 +341,12 @@ func (cc *Consensus) commit(ctx context.Context, op *LogOp, rpcOp string, redire
// LogPin submits a Cid to the shared state of the cluster. It will forward
// the operation to the leader if this is not it.
func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error {
func (cc *Consensus) LogPin(ctx context.Context, pin *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "consensus/LogPin")
defer span.End()
op := cc.op(ctx, pin, LogOpPin)
err := cc.commit(ctx, op, "ConsensusLogPin", pin.ToSerial())
err := cc.commit(ctx, op, "ConsensusLogPin", pin)
if err != nil {
return err
}
@ -355,12 +354,12 @@ func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error {
}
// LogUnpin removes a Cid from the shared state of the cluster.
func (cc *Consensus) LogUnpin(ctx context.Context, pin api.Pin) error {
func (cc *Consensus) LogUnpin(ctx context.Context, pin *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "consensus/LogUnpin")
defer span.End()
op := cc.op(ctx, pin, LogOpUnpin)
err := cc.commit(ctx, op, "ConsensusLogUnpin", pin.ToSerial())
err := cc.commit(ctx, op, "ConsensusLogUnpin", pin)
if err != nil {
return err
}

View File

@ -21,7 +21,7 @@ func cleanRaft(idn int) {
os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn))
}
func testPin(c cid.Cid) api.Pin {
func testPin(c cid.Cid) *api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
@ -89,8 +89,7 @@ func TestConsensusPin(t *testing.T) {
defer cleanRaft(1) // Remember defer runs in LIFO order
defer cc.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(ctx, testPin(c))
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
@ -102,7 +101,7 @@ func TestConsensusPin(t *testing.T) {
}
pins := st.List(ctx)
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
}
@ -113,8 +112,7 @@ func TestConsensusUnpin(t *testing.T) {
defer cleanRaft(1)
defer cc.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid2)
err := cc.LogUnpin(ctx, api.PinCid(c))
err := cc.LogUnpin(ctx, api.PinCid(test.Cid1))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
@ -127,8 +125,7 @@ func TestConsensusUpdate(t *testing.T) {
defer cc.Shutdown(ctx)
// Pin first
c1, _ := cid.Decode(test.TestCid1)
pin := testPin(c1)
pin := testPin(test.Cid1)
pin.Type = api.ShardType
err := cc.LogPin(ctx, pin)
if err != nil {
@ -137,8 +134,7 @@ func TestConsensusUpdate(t *testing.T) {
time.Sleep(250 * time.Millisecond)
// Update pin
c2, _ := cid.Decode(test.TestCid2)
pin.Reference = c2
pin.Reference = &test.Cid2
err = cc.LogPin(ctx, pin)
if err != nil {
t.Error("the update op did not make it to the log:", err)
@ -151,10 +147,10 @@ func TestConsensusUpdate(t *testing.T) {
}
pins := st.List(ctx)
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the added pin should be in the state")
}
if !pins[0].Reference.Equals(c2) {
if !pins[0].Reference.Equals(test.Cid2) {
t.Error("pin updated incorrectly")
}
}
@ -217,8 +213,7 @@ func TestConsensusRmPeer(t *testing.T) {
}
cc.raft.WaitForLeader(ctx)
c, _ := cid.Decode(test.TestCid1)
err = cc.LogPin(ctx, testPin(c))
err = cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("could not pin after adding peer:", err)
}
@ -226,7 +221,7 @@ func TestConsensusRmPeer(t *testing.T) {
time.Sleep(2 * time.Second)
// Remove unexisting peer
err = cc.RmPeer(ctx, test.TestPeerID1)
err = cc.RmPeer(ctx, test.PeerID1)
if err != nil {
t.Fatal("the operation did not make it to the log:", err)
}
@ -267,8 +262,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
defer cc.Shutdown(ctx)
// Make pin 1
c1, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(ctx, testPin(c1))
err := cc.LogPin(ctx, testPin(test.Cid1))
if err != nil {
t.Error("the first pin did not make it to the log:", err)
}
@ -280,8 +274,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
}
// Make pin 2
c2, _ := cid.Decode(test.TestCid2)
err = cc.LogPin(ctx, testPin(c2))
err = cc.LogPin(ctx, testPin(test.Cid2))
if err != nil {
t.Error("the second pin did not make it to the log:", err)
}
@ -293,8 +286,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
}
// Make pin 3
c3, _ := cid.Decode(test.TestCid3)
err = cc.LogPin(ctx, testPin(c3))
err = cc.LogPin(ctx, testPin(test.Cid3))
if err != nil {
t.Error("the third pin did not make it to the log:", err)
}

View File

@ -26,12 +26,12 @@ type LogOpType int
// It implements the consensus.Op interface and it is used by the
// Consensus component.
type LogOp struct {
SpanCtx trace.SpanContext
TagCtx []byte
Cid api.PinSerial
Type LogOpType
consensus *Consensus
tracing bool
SpanCtx trace.SpanContext `codec:"s,omitempty"`
TagCtx []byte `codec:"t,omitempty"`
Cid *api.Pin `codec:"c,omitempty"`
Type LogOpType `codec:"p,omitempty"`
consensus *Consensus `codec:"-"`
tracing bool `codec:"-"`
}
// ApplyTo applies the operation to the State
@ -55,16 +55,16 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
panic("received unexpected state type")
}
// Copy the Cid. We are about to pass it to go-routines
// that will make things with it (read its fields). However,
// as soon as ApplyTo is done, the next operation will be deserealized
// on top of "op". This can cause data races with the slices in
// api.PinSerial, which don't get copied when passed.
pinS := op.Cid.Clone()
pin := op.Cid
// We are about to pass "pin" it to go-routines that will make things
// with it (read its fields). However, as soon as ApplyTo is done, the
// next operation will be deserealized on top of "op". We nullify it
// to make sure no data races occur.
op.Cid = nil
switch op.Type {
case LogOpPin:
err = state.Add(ctx, pinS.ToPin())
err = state.Add(ctx, pin)
if err != nil {
logger.Error(err)
goto ROLLBACK
@ -75,12 +75,12 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
"",
"Cluster",
"Track",
pinS,
pin,
&struct{}{},
nil,
)
case LogOpUnpin:
err = state.Rm(ctx, pinS.DecodeCid())
err = state.Rm(ctx, pin.Cid)
if err != nil {
logger.Error(err)
goto ROLLBACK
@ -91,7 +91,7 @@ func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
"",
"Cluster",
"Untrack",
pinS,
pin,
&struct{}{},
nil,
)

View File

@ -4,8 +4,6 @@ import (
"context"
"testing"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
@ -15,7 +13,7 @@ func TestApplyToPin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.Cid1),
Type: LogOpPin,
consensus: cc,
}
@ -25,7 +23,7 @@ func TestApplyToPin(t *testing.T) {
st := mapstate.NewMapState()
op.ApplyTo(st)
pins := st.List(ctx)
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) {
t.Error("the state was not modified correctly")
}
}
@ -34,7 +32,7 @@ func TestApplyToUnpin(t *testing.T) {
ctx := context.Background()
cc := testingConsensus(t, 1)
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.Cid1),
Type: LogOpUnpin,
consensus: cc,
}
@ -42,8 +40,7 @@ func TestApplyToUnpin(t *testing.T) {
defer cc.Shutdown(ctx)
st := mapstate.NewMapState()
c, _ := cid.Decode(test.TestCid1)
st.Add(ctx, testPin(c))
st.Add(ctx, testPin(test.Cid1))
op.ApplyTo(st)
pins := st.List(ctx)
if len(pins) != 0 {
@ -59,7 +56,7 @@ func TestApplyToBadState(t *testing.T) {
}()
op := &LogOp{
Cid: api.PinSerial{Cid: test.TestCid1},
Cid: api.PinCid(test.Cid1),
Type: LogOpUnpin,
}

View File

@ -67,12 +67,12 @@ func (disk *Informer) Shutdown(ctx context.Context) error {
// GetMetric returns the metric obtained by this
// Informer.
func (disk *Informer) GetMetric(ctx context.Context) api.Metric {
func (disk *Informer) GetMetric(ctx context.Context) *api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/disk/GetMetric")
defer span.End()
if disk.rpcClient == nil {
return api.Metric{
return &api.Metric{
Name: disk.Name(),
Valid: false,
}
@ -103,7 +103,7 @@ func (disk *Informer) GetMetric(ctx context.Context) api.Metric {
}
}
m := api.Metric{
m := &api.Metric{
Name: disk.Name(),
Value: fmt.Sprintf("%d", metric),
Valid: valid,

View File

@ -58,12 +58,12 @@ func (npi *Informer) Name() string {
// GetMetric contacts the IPFSConnector component and
// requests the `pin ls` command. We return the number
// of pins in IPFS.
func (npi *Informer) GetMetric(ctx context.Context) api.Metric {
func (npi *Informer) GetMetric(ctx context.Context) *api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric")
defer span.End()
if npi.rpcClient == nil {
return api.Metric{
return &api.Metric{
Valid: false,
}
}
@ -83,7 +83,7 @@ func (npi *Informer) GetMetric(ctx context.Context) api.Metric {
valid := err == nil
m := api.Metric{
m := &api.Metric{
Name: MetricName,
Value: fmt.Sprintf("%d", len(pinMap)),
Valid: valid,

View File

@ -39,9 +39,9 @@ type Consensus interface {
// allowing the main component to wait for it during start.
Ready(context.Context) <-chan struct{}
// Logs a pin operation
LogPin(ctx context.Context, c api.Pin) error
LogPin(ctx context.Context, c *api.Pin) error
// Logs an unpin operation
LogUnpin(ctx context.Context, c api.Pin) error
LogUnpin(ctx context.Context, c *api.Pin) error
AddPeer(ctx context.Context, p peer.ID) error
RmPeer(ctx context.Context, p peer.ID) error
State(context.Context) (state.State, error)
@ -67,7 +67,7 @@ type API interface {
// an IPFS daemon. This is a base component.
type IPFSConnector interface {
Component
ID(context.Context) (api.IPFSID, error)
ID(context.Context) (*api.IPFSID, error)
Pin(context.Context, cid.Cid, int) error
Unpin(context.Context, cid.Cid) error
PinLsCid(context.Context, cid.Cid) (api.IPFSPinStatus, error)
@ -76,17 +76,17 @@ type IPFSConnector interface {
// other peers IPFS daemons.
ConnectSwarms(context.Context) error
// SwarmPeers returns the IPFS daemon's swarm peers
SwarmPeers(context.Context) (api.SwarmPeers, error)
SwarmPeers(context.Context) ([]peer.ID, error)
// ConfigKey returns the value for a configuration key.
// Subobjects are reached with keypaths as "Parent/Child/GrandChild...".
ConfigKey(keypath string) (interface{}, error)
// RepoStat returns the current repository size and max limit as
// provided by "repo stat".
RepoStat(context.Context) (api.IPFSRepoStat, error)
RepoStat(context.Context) (*api.IPFSRepoStat, error)
// Resolve returns a cid given a path
Resolve(context.Context, string) (cid.Cid, error)
// BlockPut directly adds a block of data to the IPFS repo
BlockPut(context.Context, api.NodeWithMeta) error
BlockPut(context.Context, *api.NodeWithMeta) error
// BlockGet retrieves the raw data of an IPFS block
BlockGet(context.Context, cid.Cid) ([]byte, error)
}
@ -106,24 +106,24 @@ type PinTracker interface {
Component
// Track tells the tracker that a Cid is now under its supervision
// The tracker may decide to perform an IPFS pin.
Track(context.Context, api.Pin) error
Track(context.Context, *api.Pin) error
// Untrack tells the tracker that a Cid is to be forgotten. The tracker
// may perform an IPFS unpin operation.
Untrack(context.Context, cid.Cid) error
// StatusAll returns the list of pins with their local status.
StatusAll(context.Context) []api.PinInfo
StatusAll(context.Context) []*api.PinInfo
// Status returns the local status of a given Cid.
Status(context.Context, cid.Cid) api.PinInfo
Status(context.Context, cid.Cid) *api.PinInfo
// SyncAll makes sure that all tracked Cids reflect the real IPFS status.
// It returns the list of pins which were updated by the call.
SyncAll(context.Context) ([]api.PinInfo, error)
SyncAll(context.Context) ([]*api.PinInfo, error)
// Sync makes sure that the Cid status reflect the real IPFS status.
// It returns the local status of the Cid.
Sync(context.Context, cid.Cid) (api.PinInfo, error)
Sync(context.Context, cid.Cid) (*api.PinInfo, error)
// RecoverAll calls Recover() for all pins tracked.
RecoverAll(context.Context) ([]api.PinInfo, error)
RecoverAll(context.Context) ([]*api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in a Cids with error status.
Recover(context.Context, cid.Cid) (api.PinInfo, error)
Recover(context.Context, cid.Cid) (*api.PinInfo, error)
}
// Informer provides Metric information from a peer. The metrics produced by
@ -133,7 +133,7 @@ type PinTracker interface {
type Informer interface {
Component
Name() string
GetMetric(context.Context) api.Metric
GetMetric(context.Context) *api.Metric
}
// PinAllocator decides where to pin certain content. In order to make such
@ -148,7 +148,7 @@ type PinAllocator interface {
// which are currently pinning the content. The candidates map
// contains the metrics for all peers which are eligible for pinning
// the content.
Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error)
Allocate(ctx context.Context, c cid.Cid, current, candidates, priority map[peer.ID]*api.Metric) ([]peer.ID, error)
}
// PeerMonitor is a component in charge of publishing a peer's metrics and
@ -162,17 +162,17 @@ type PeerMonitor interface {
Component
// LogMetric stores a metric. It can be used to manually inject
// a metric to a monitor.
LogMetric(context.Context, api.Metric) error
LogMetric(context.Context, *api.Metric) error
// PublishMetric sends a metric to the rest of the peers.
// How to send it, and to who, is to be decided by the implementation.
PublishMetric(context.Context, api.Metric) error
PublishMetric(context.Context, *api.Metric) error
// LatestMetrics returns a map with the latest metrics of matching name
// for the current cluster peers.
LatestMetrics(ctx context.Context, name string) []api.Metric
LatestMetrics(ctx context.Context, name string) []*api.Metric
// Alerts delivers alerts generated when this peer monitor detects
// a problem (i.e. metrics not arriving as expected). Alerts can be used
// to trigger self-healing measures or re-pinnings of content.
Alerts() <-chan api.Alert
Alerts() <-chan *api.Alert
}
// Tracer implements Component as a way

View File

@ -29,7 +29,6 @@ import (
"github.com/ipfs/ipfs-cluster/test"
"github.com/ipfs/ipfs-cluster/version"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
@ -407,8 +406,8 @@ func TestClustersPeers(t *testing.T) {
t.Fatal("expected as many peers as clusters")
}
clusterIDMap := make(map[peer.ID]api.ID)
peerIDMap := make(map[peer.ID]api.ID)
clusterIDMap := make(map[peer.ID]*api.ID)
peerIDMap := make(map[peer.ID]*api.ID)
for _, c := range clusters {
id := c.ID(ctx)
@ -437,8 +436,7 @@ func TestClustersPin(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
exampleCid, _ := cid.Decode(test.TestCid1)
prefix := exampleCid.Prefix()
prefix := test.Cid1.Prefix()
ttlDelay()
@ -504,7 +502,7 @@ func TestClustersStatusAll(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
clusters[0].Pin(ctx, api.PinCid(h))
pinDelay()
// Global status
@ -516,7 +514,7 @@ func TestClustersStatusAll(t *testing.T) {
if len(statuses) != 1 {
t.Fatal("bad status. Expected one item")
}
if statuses[0].Cid.String() != test.TestCid1 {
if !statuses[0].Cid.Equals(h) {
t.Error("bad cid in status")
}
info := statuses[0].PeerMap
@ -524,7 +522,8 @@ func TestClustersStatusAll(t *testing.T) {
t.Error("bad info in status")
}
if info[c.host.ID()].Status != api.TrackerStatusPinned {
pid := peer.IDB58Encode(c.host.ID())
if info[pid].Status != api.TrackerStatusPinned {
t.Error("the hash should have been pinned")
}
@ -533,12 +532,13 @@ func TestClustersStatusAll(t *testing.T) {
t.Error(err)
}
pinfo, ok := status.PeerMap[c.host.ID()]
pinfo, ok := status.PeerMap[pid]
if !ok {
t.Fatal("Host not in status")
}
if pinfo.Status != api.TrackerStatusPinned {
t.Error(pinfo.Error)
t.Error("the status should show the hash as pinned")
}
}
@ -549,7 +549,7 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
clusters[0].Pin(ctx, api.PinCid(h))
pinDelay()
@ -576,9 +576,10 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Error("bad number of peers in status")
}
errst := stts.PeerMap[clusters[1].ID(ctx).ID]
pid := peer.IDB58Encode(clusters[1].ID(ctx).ID)
errst := stts.PeerMap[pid]
if errst.Cid.String() != test.TestCid1 {
if !errst.Cid.Equals(h) {
t.Error("errored pinInfo should have a good cid")
}
@ -592,13 +593,13 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Error(err)
}
pinfo := status.PeerMap[clusters[1].ID(ctx).ID]
pinfo := status.PeerMap[pid]
if pinfo.Status != api.TrackerStatusClusterError {
t.Error("erroring status should be ClusterError")
}
if pinfo.Cid.String() != test.TestCid1 {
if !pinfo.Cid.Equals(h) {
t.Error("errored status should have a good cid")
}
@ -610,10 +611,8 @@ func TestClustersSyncAllLocal(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(ctx, api.PinCid(h))
clusters[0].Pin(ctx, api.PinCid(h2))
clusters[0].Pin(ctx, api.PinCid(test.ErrorCid)) // This cid always fails
clusters[0].Pin(ctx, api.PinCid(test.Cid2))
pinDelay()
pinDelay()
@ -641,8 +640,8 @@ func TestClustersSyncLocal(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
h := test.ErrorCid
h2 := test.Cid2
clusters[0].Pin(ctx, api.PinCid(h))
clusters[0].Pin(ctx, api.PinCid(h2))
pinDelay()
@ -674,10 +673,8 @@ func TestClustersSyncAll(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(ctx, api.PinCid(h))
clusters[0].Pin(ctx, api.PinCid(h2))
clusters[0].Pin(ctx, api.PinCid(test.ErrorCid))
clusters[0].Pin(ctx, api.PinCid(test.Cid2))
pinDelay()
pinDelay()
@ -689,11 +686,11 @@ func TestClustersSyncAll(t *testing.T) {
if len(ginfos) != 1 {
t.Fatalf("expected globalsync to have 1 elements, got = %d", len(ginfos))
}
if ginfos[0].Cid.String() != test.ErrorCid {
if !ginfos[0].Cid.Equals(test.ErrorCid) {
t.Error("expected globalsync to have problems with test.ErrorCid")
}
for _, c := range clusters {
inf, ok := ginfos[0].PeerMap[c.host.ID()]
inf, ok := ginfos[0].PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -707,8 +704,8 @@ func TestClustersSync(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
h := test.ErrorCid // This cid always fails
h2 := test.Cid2
clusters[0].Pin(ctx, api.PinCid(h))
clusters[0].Pin(ctx, api.PinCid(h2))
pinDelay()
@ -721,7 +718,7 @@ func TestClustersSync(t *testing.T) {
// with errors contained in GlobalPinInfo
t.Fatal("did not expect an error")
}
pinfo, ok := ginfo.PeerMap[clusters[j].host.ID()]
pinfo, ok := ginfo.PeerMap[peer.IDB58Encode(clusters[j].host.ID())]
if !ok {
t.Fatal("should have info for this host")
}
@ -729,12 +726,12 @@ func TestClustersSync(t *testing.T) {
t.Error("pinInfo error should not be empty")
}
if ginfo.Cid.String() != test.ErrorCid {
if !ginfo.Cid.Equals(h) {
t.Error("GlobalPinInfo should be for test.ErrorCid")
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Logf("%+v", ginfo)
t.Fatal("GlobalPinInfo should not be empty for this host")
@ -751,12 +748,12 @@ func TestClustersSync(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != test.TestCid2 {
if !ginfo.Cid.Equals(h2) {
t.Error("GlobalPinInfo should be for testrCid2")
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -770,8 +767,8 @@ func TestClustersRecoverLocal(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
h := test.ErrorCid // This cid always fails
h2 := test.Cid2
ttlDelay()
@ -810,8 +807,8 @@ func TestClustersRecover(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
h := test.ErrorCid // This cid always fails
h2 := test.Cid2
ttlDelay()
@ -837,7 +834,7 @@ func TestClustersRecover(t *testing.T) {
t.Fatal(err)
}
pinfo, ok := ginfo.PeerMap[clusters[j].host.ID()]
pinfo, ok := ginfo.PeerMap[peer.IDB58Encode(clusters[j].host.ID())]
if !ok {
t.Fatal("should have info for this host")
}
@ -846,7 +843,7 @@ func TestClustersRecover(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should not be empty for this host")
}
@ -863,12 +860,12 @@ func TestClustersRecover(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != test.TestCid2 {
if !ginfo.Cid.Equals(h2) {
t.Error("GlobalPinInfo should be for testrCid2")
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -912,8 +909,7 @@ func TestClustersReplication(t *testing.T) {
// will result in each peer holding locally exactly
// nCluster pins.
tmpCid, _ := cid.Decode(test.TestCid1)
prefix := tmpCid.Prefix()
prefix := test.Cid1.Prefix()
for i := 0; i < nClusters; i++ {
// Pick a random cluster and hash
@ -1014,7 +1010,7 @@ func TestClustersReplicationFactorMax(t *testing.T) {
ttlDelay()
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1060,7 +1056,7 @@ func TestClustersReplicationFactorMaxLower(t *testing.T) {
ttlDelay() // make sure we have places to pin
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1120,7 +1116,7 @@ func TestClustersReplicationFactorInBetween(t *testing.T) {
waitForLeaderAndMetrics(t, clusters)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1173,7 +1169,7 @@ func TestClustersReplicationFactorMin(t *testing.T) {
clusters[nClusters-2].Shutdown(ctx)
waitForLeaderAndMetrics(t, clusters)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err == nil {
t.Error("Pin should have failed as rplMin cannot be satisfied")
@ -1201,7 +1197,7 @@ func TestClustersReplicationMinMaxNoRealloc(t *testing.T) {
ttlDelay()
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1254,7 +1250,7 @@ func TestClustersReplicationMinMaxRealloc(t *testing.T) {
ttlDelay() // make sure metrics are in
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1335,7 +1331,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[j].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1345,8 +1341,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
pinDelay()
pin := clusters[j].Pins(ctx)[0]
pinSerial := pin.ToSerial()
allocs := sort.StringSlice(pinSerial.Allocations)
allocs := sort.StringSlice(api.PeersToStrings(pin.Allocations))
allocs.Sort()
allocsStr := fmt.Sprintf("%s", allocs)
@ -1360,8 +1355,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
pinDelay()
pin2 := clusters[j].Pins(ctx)[0]
pinSerial2 := pin2.ToSerial()
allocs2 := sort.StringSlice(pinSerial2.Allocations)
allocs2 := sort.StringSlice(api.PeersToStrings(pin2.Allocations))
allocs2.Sort()
allocsStr2 := fmt.Sprintf("%s", allocs2)
if allocsStr != allocsStr2 {
@ -1434,7 +1428,7 @@ func TestClustersReplicationNotEnoughPeers(t *testing.T) {
}
j := rand.Intn(nClusters)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[j].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -1473,13 +1467,13 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
}
// pin something
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
clusters[0].Pin(ctx, api.PinCid(h))
pinDelay()
pinLocal := 0
pinRemote := 0
var localPinner peer.ID
var remotePinner peer.ID
var localPinner string
var remotePinner string
var remotePinnerCluster *Cluster
status, _ := clusters[0].Status(ctx, h)
@ -1501,9 +1495,10 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
// kill the local pinner
for _, c := range clusters {
if c.id == localPinner {
clid := peer.IDB58Encode(c.id)
if clid == localPinner {
c.Shutdown(ctx)
} else if c.id == remotePinner {
} else if clid == remotePinner {
remotePinnerCluster = c
}
}
@ -1521,7 +1516,7 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
// peers in clusterIDs are fully connected to each other and the expected ipfs
// mock connectivity exists. Cluster peers not in clusterIDs are assumed to
// be disconnected and the graph should reflect this
func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[peer.ID]struct{}) {
func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[string]struct{}) {
// Check that all cluster peers see each other as peers
for id1, peers := range graph.ClusterLinks {
if _, ok := clusterIDs[id1]; !ok {
@ -1530,14 +1525,14 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[p
}
continue
}
fmt.Printf("id: %s, peers: %v\n", id1, peers)
t.Logf("id: %s, peers: %v\n", id1, peers)
if len(peers) > len(clusterIDs)-1 {
t.Errorf("More peers recorded in graph than expected")
}
// Make lookup index for peers connected to id1
peerIndex := make(map[peer.ID]struct{})
for _, peer := range peers {
peerIndex[peer] = struct{}{}
peerIndex := make(map[string]struct{})
for _, p := range peers {
peerIndex[peer.IDB58Encode(p)] = struct{}{}
}
for id2 := range clusterIDs {
if _, ok := peerIndex[id2]; id1 != id2 && !ok {
@ -1560,12 +1555,12 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[p
if len(graph.IPFSLinks) != 1 {
t.Error("Expected exactly one ipfs peer for all cluster nodes, the mocked peer")
}
links, ok := graph.IPFSLinks[test.TestPeerID1]
links, ok := graph.IPFSLinks[peer.IDB58Encode(test.PeerID1)]
if !ok {
t.Error("Expected the mocked ipfs peer to be a node in the graph")
} else {
if len(links) != 2 || links[0] != test.TestPeerID4 ||
links[1] != test.TestPeerID5 {
if len(links) != 2 || links[0] != test.PeerID4 ||
links[1] != test.PeerID5 {
t.Error("Swarm peers of mocked ipfs are not those expected")
}
}
@ -1575,7 +1570,7 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[p
if ipfsID, ok := graph.ClustertoIPFS[id]; !ok {
t.Errorf("Expected graph to record peer %s's ipfs connection", id)
} else {
if ipfsID != test.TestPeerID1 {
if ipfsID != test.PeerID1 {
t.Errorf("Unexpected error %s", ipfsID)
}
}
@ -1598,9 +1593,9 @@ func TestClustersGraphConnected(t *testing.T) {
t.Fatal(err)
}
clusterIDs := make(map[peer.ID]struct{})
clusterIDs := make(map[string]struct{})
for _, c := range clusters {
id := c.ID(ctx).ID
id := peer.IDB58Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
validateClusterGraph(t, graph, clusterIDs)
@ -1642,12 +1637,12 @@ func TestClustersGraphUnhealthy(t *testing.T) {
t.Fatal(err)
}
clusterIDs := make(map[peer.ID]struct{})
clusterIDs := make(map[string]struct{})
for i, c := range clusters {
if i == discon1 || i == discon2 {
continue
}
id := c.ID(ctx).ID
id := peer.IDB58Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
validateClusterGraph(t, graph, clusterIDs)
@ -1668,7 +1663,7 @@ func TestClustersDisabledRepinning(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[j].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)

View File

@ -30,7 +30,6 @@ import (
logging "github.com/ipfs/go-log"
rpc "github.com/libp2p/go-libp2p-gorpc"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
)
@ -219,38 +218,37 @@ func (ipfs *Connector) Shutdown(ctx context.Context) error {
// ID performs an ID request against the configured
// IPFS daemon. It returns the fetched information.
// If the request fails, or the parsing fails, it
// returns an error and an empty IPFSID which also
// contains the error message.
func (ipfs *Connector) ID(ctx context.Context) (api.IPFSID, error) {
// returns an error.
func (ipfs *Connector) ID(ctx context.Context) (*api.IPFSID, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/ID")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
id := api.IPFSID{}
body, err := ipfs.postCtx(ctx, "id", "", nil)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
var res ipfsIDResp
err = json.Unmarshal(body, &res)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
pID, err := peer.IDB58Decode(res.ID)
if err != nil {
id.Error = err.Error()
return id, err
return nil, err
}
id.ID = pID
mAddrs := make([]ma.Multiaddr, len(res.Addresses), len(res.Addresses))
id := &api.IPFSID{
ID: pID,
}
mAddrs := make([]api.Multiaddr, len(res.Addresses), len(res.Addresses))
for i, strAddr := range res.Addresses {
mAddr, err := ma.NewMultiaddr(strAddr)
mAddr, err := api.NewMultiaddr(strAddr)
if err != nil {
id.Error = err.Error()
return id, err
@ -498,30 +496,29 @@ func (ipfs *Connector) ConnectSwarms(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
idsSerial := make([]api.IDSerial, 0)
var ids []*api.ID
err := ipfs.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Peers",
struct{}{},
&idsSerial,
&ids,
)
if err != nil {
logger.Error(err)
return err
}
logger.Debugf("%+v", idsSerial)
for _, idSerial := range idsSerial {
ipfsID := idSerial.IPFS
for _, id := range ids {
ipfsID := id.IPFS
for _, addr := range ipfsID.Addresses {
// This is a best effort attempt
// We ignore errors which happens
// when passing in a bunch of addresses
_, err := ipfs.postCtx(
ctx,
fmt.Sprintf("swarm/connect?arg=%s", addr),
fmt.Sprintf("swarm/connect?arg=%s", addr.String()),
"",
nil,
)
@ -583,7 +580,7 @@ func getConfigValue(path []string, cfg map[string]interface{}) (interface{}, err
// RepoStat returns the DiskUsage and StorageMax repo/stat values from the
// ipfs daemon, in bytes, wrapped as an IPFSRepoStat object.
func (ipfs *Connector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
func (ipfs *Connector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoStat")
defer span.End()
@ -592,16 +589,16 @@ func (ipfs *Connector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) {
res, err := ipfs.postCtx(ctx, "repo/stat?size-only=true", "", nil)
if err != nil {
logger.Error(err)
return api.IPFSRepoStat{}, err
return nil, err
}
var stats api.IPFSRepoStat
err = json.Unmarshal(res, &stats)
if err != nil {
logger.Error(err)
return stats, err
return nil, err
}
return stats, nil
return &stats, nil
}
// Resolve accepts ipfs or ipns path and resolves it into a cid
@ -640,26 +637,26 @@ func (ipfs *Connector) Resolve(ctx context.Context, path string) (cid.Cid, error
}
// SwarmPeers returns the peers currently connected to this ipfs daemon.
func (ipfs *Connector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
func (ipfs *Connector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/SwarmPeers")
defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
swarm := api.SwarmPeers{}
res, err := ipfs.postCtx(ctx, "swarm/peers", "", nil)
if err != nil {
logger.Error(err)
return swarm, err
return nil, err
}
var peersRaw ipfsSwarmPeersResp
err = json.Unmarshal(res, &peersRaw)
if err != nil {
logger.Error(err)
return swarm, err
return nil, err
}
swarm = make([]peer.ID, len(peersRaw.Peers))
swarm := make([]peer.ID, len(peersRaw.Peers))
for i, p := range peersRaw.Peers {
pID, err := peer.IDB58Decode(p.Peer)
if err != nil {
@ -673,7 +670,7 @@ func (ipfs *Connector) SwarmPeers(ctx context.Context) (api.SwarmPeers, error) {
// BlockPut triggers an ipfs block put on the given data, inserting the block
// into the ipfs daemon's repo.
func (ipfs *Connector) BlockPut(ctx context.Context, b api.NodeWithMeta) error {
func (ipfs *Connector) BlockPut(ctx context.Context, b *api.NodeWithMeta) error {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/BlockPut")
defer span.End()

View File

@ -7,7 +7,6 @@ import (
"testing"
"time"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
ma "github.com/multiformats/go-multiaddr"
@ -53,7 +52,7 @@ func TestIPFSID(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if id.ID != test.TestPeerID1 {
if id.ID != test.PeerID1 {
t.Error("expected testPeerID")
}
if len(id.Addresses) != 1 {
@ -67,9 +66,6 @@ func TestIPFSID(t *testing.T) {
if err == nil {
t.Error("expected an error")
}
if id.Error != err.Error() {
t.Error("error messages should match")
}
}
func testPin(t *testing.T, method string) {
@ -80,7 +76,7 @@ func testPin(t *testing.T, method string) {
ipfs.config.PinMethod = method
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err := ipfs.Pin(ctx, c, -1)
if err != nil {
t.Error("expected success pinning cid")
@ -93,7 +89,7 @@ func testPin(t *testing.T, method string) {
t.Error("cid should have been pinned")
}
c2, _ := cid.Decode(test.ErrorCid)
c2 := test.ErrorCid
err = ipfs.Pin(ctx, c2, -1)
if err == nil {
t.Error("expected error pinning cid")
@ -110,7 +106,7 @@ func TestIPFSUnpin(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err := ipfs.Unpin(ctx, c)
if err != nil {
t.Error("expected success unpinning non-pinned cid")
@ -127,8 +123,8 @@ func TestIPFSPinLsCid(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
c := test.Cid1
c2 := test.Cid2
ipfs.Pin(ctx, c, -1)
ips, err := ipfs.PinLsCid(ctx, c)
@ -147,8 +143,8 @@ func TestIPFSPinLs(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown(ctx)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
c := test.Cid1
c2 := test.Cid2
ipfs.Pin(ctx, c, -1)
ipfs.Pin(ctx, c2, -1)
@ -161,7 +157,7 @@ func TestIPFSPinLs(t *testing.T) {
t.Fatal("the map does not contain expected keys")
}
if !ipsMap[test.TestCid1].IsPinned(-1) || !ipsMap[test.TestCid2].IsPinned(-1) {
if !ipsMap[test.Cid1.String()].IsPinned(-1) || !ipsMap[test.Cid2.String()].IsPinned(-1) {
t.Error("c1 and c2 should appear pinned")
}
}
@ -204,10 +200,10 @@ func TestSwarmPeers(t *testing.T) {
if len(swarmPeers) != 2 {
t.Fatal("expected 2 swarm peers")
}
if swarmPeers[0] != test.TestPeerID4 {
if swarmPeers[0] != test.PeerID4 {
t.Error("unexpected swarm peer")
}
if swarmPeers[1] != test.TestPeerID5 {
if swarmPeers[1] != test.PeerID5 {
t.Error("unexpected swarm peer")
}
}
@ -218,10 +214,10 @@ func TestBlockPut(t *testing.T) {
defer mock.Close()
defer ipfs.Shutdown(ctx)
data := []byte(test.TestCid4Data)
err := ipfs.BlockPut(ctx, api.NodeWithMeta{
data := []byte(test.Cid4Data)
err := ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: data,
Cid: test.TestCid4,
Cid: test.Cid4,
Format: "raw",
})
if err != nil {
@ -235,20 +231,17 @@ func TestBlockGet(t *testing.T) {
defer mock.Close()
defer ipfs.Shutdown(ctx)
shardCid, err := cid.Decode(test.TestShardCid)
if err != nil {
t.Fatal(err)
}
shardCid := test.ShardCid
// Fail when getting before putting
_, err = ipfs.BlockGet(ctx, shardCid)
_, err := ipfs.BlockGet(ctx, shardCid)
if err == nil {
t.Fatal("expected to fail getting unput block")
}
// Put and then successfully get
err = ipfs.BlockPut(ctx, api.NodeWithMeta{
Data: test.TestShardData,
Cid: test.TestShardCid,
err = ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: test.ShardData,
Cid: test.ShardCid,
Format: "cbor",
})
if err != nil {
@ -259,7 +252,7 @@ func TestBlockGet(t *testing.T) {
if err != nil {
t.Error(err)
}
if !bytes.Equal(data, test.TestShardData) {
if !bytes.Equal(data, test.ShardData) {
t.Fatal("unexpected data returned")
}
}
@ -279,7 +272,7 @@ func TestRepoStat(t *testing.T) {
t.Error("expected 0 bytes of size")
}
c, _ := cid.Decode(test.TestCid1)
c := test.Cid1
err = ipfs.Pin(ctx, c, -1)
if err != nil {
t.Error("expected success pinning cid")
@ -300,12 +293,12 @@ func TestResolve(t *testing.T) {
defer mock.Close()
defer ipfs.Shutdown(ctx)
s, err := ipfs.Resolve(ctx, test.TestPathIPFS2)
s, err := ipfs.Resolve(ctx, test.PathIPFS2)
if err != nil {
t.Error(err)
}
if s.String() != test.TestCidResolved {
t.Errorf("expected different cid, expected: %s, found: %s\n", test.TestCidResolved, s.String())
if !s.Equals(test.CidResolved) {
t.Errorf("expected different cid, expected: %s, found: %s\n", test.CidResolved, s.String())
}
}

View File

@ -13,6 +13,7 @@ var LoggingFacilities = map[string]string{
"ipfshttp": "INFO",
"monitor": "INFO",
"mapstate": "INFO",
"dsstate": "INFO",
"consensus": "INFO",
"pintracker": "INFO",
"ascendalloc": "INFO",

View File

@ -103,7 +103,7 @@ func (mon *Monitor) Shutdown(ctx context.Context) error {
}
// LogMetric stores a metric so it can later be retrieved.
func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) LogMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/basic/LogMetric")
defer span.End()
@ -113,7 +113,7 @@ func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
}
// PublishMetric broadcasts a metric to all current cluster peers.
func (mon *Monitor) PublishMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) PublishMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/basic/PublishMetric")
defer span.End()
@ -197,7 +197,7 @@ func (mon *Monitor) getPeers(ctx context.Context) ([]peer.ID, error) {
// LatestMetrics returns last known VALID metrics of a given type. A metric
// is only valid if it has not expired and belongs to a current cluster peers.
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric {
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []*api.Metric {
ctx, span := trace.StartSpan(ctx, "monitor/basic/LatestMetrics")
defer span.End()
@ -206,7 +206,7 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Make sure we only return metrics in the current peerset
peers, err := mon.getPeers(ctx)
if err != nil {
return []api.Metric{}
return []*api.Metric{}
}
return metrics.PeersetFilter(latest, peers)
@ -214,6 +214,6 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Alerts returns a channel on which alerts are sent when the
// monitor detects a failure.
func (mon *Monitor) Alerts() <-chan api.Alert {
func (mon *Monitor) Alerts() <-chan *api.Alert {
return mon.checker.Alerts()
}

View File

@ -28,10 +28,10 @@ func newMetricFactory() *metricFactory {
}
}
func (mf *metricFactory) newMetric(n string, p peer.ID) api.Metric {
func (mf *metricFactory) newMetric(n string, p peer.ID) *api.Metric {
mf.l.Lock()
defer mf.l.Unlock()
m := api.Metric{
m := &api.Metric{
Name: n,
Peer: p,
Value: fmt.Sprintf("%d", mf.counter),
@ -91,9 +91,9 @@ func TestLogMetricConcurrent(t *testing.T) {
f := func() {
defer wg.Done()
for i := 0; i < 25; i++ {
mt := api.Metric{
mt := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Peer: test.PeerID1,
Value: fmt.Sprintf("%d", time.Now().UnixNano()),
Valid: true,
}
@ -145,15 +145,15 @@ func TestPeerMonitorLogMetric(t *testing.T) {
mf := newMetricFactory()
// dont fill window
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID1))
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID2))
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID1))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID2))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID3))
// fill window
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
latestMetrics := pm.LatestMetrics(ctx, "testbad")
if len(latestMetrics) != 0 {
@ -168,15 +168,15 @@ func TestPeerMonitorLogMetric(t *testing.T) {
for _, v := range latestMetrics {
switch v.Peer {
case test.TestPeerID1:
case test.PeerID1:
if v.Value != "0" {
t.Error("bad metric value")
}
case test.TestPeerID2:
case test.PeerID2:
if v.Value != "1" {
t.Error("bad metric value")
}
case test.TestPeerID3:
case test.PeerID3:
if v.Value != "2" {
t.Error("bad metric value")
}
@ -206,7 +206,7 @@ func TestPeerMonitorPublishMetric(t *testing.T) {
defer h.Close()
mf := newMetricFactory()
metric := mf.newMetric("test", test.TestPeerID1)
metric := mf.newMetric("test", test.PeerID1)
err = pm.PublishMetric(ctx, metric)
// Note mock rpc returns 3 consensus peers and we cannot
@ -223,7 +223,7 @@ func TestPeerMonitorAlerts(t *testing.T) {
defer pm.Shutdown(ctx)
mf := newMetricFactory()
mtr := mf.newMetric("test", test.TestPeerID1)
mtr := mf.newMetric("test", test.PeerID1)
mtr.SetTTL(0)
pm.LogMetric(ctx, mtr)
time.Sleep(time.Second)
@ -238,7 +238,7 @@ func TestPeerMonitorAlerts(t *testing.T) {
if alrt.MetricName != "test" {
t.Error("Alert should be for test")
}
if alrt.Peer != test.TestPeerID1 {
if alrt.Peer != test.PeerID1 {
t.Error("Peer should be TestPeerID1")
}
}

View File

@ -19,7 +19,7 @@ var ErrAlertChannelFull = errors.New("alert channel is full")
// Checker provides utilities to find expired metrics
// for a given peerset and send alerts if it proceeds to do so.
type Checker struct {
alertCh chan api.Alert
alertCh chan *api.Alert
metrics *Store
}
@ -27,7 +27,7 @@ type Checker struct {
// MetricsStore.
func NewChecker(metrics *Store) *Checker {
return &Checker{
alertCh: make(chan api.Alert, AlertChannelCap),
alertCh: make(chan *api.Alert, AlertChannelCap),
metrics: metrics,
}
}
@ -49,7 +49,7 @@ func (mc *Checker) CheckPeers(peers []peer.ID) error {
}
func (mc *Checker) alert(pid peer.ID, metricName string) error {
alrt := api.Alert{
alrt := &api.Alert{
Peer: pid,
MetricName: metricName,
}
@ -62,7 +62,7 @@ func (mc *Checker) alert(pid peer.ID, metricName string) error {
}
// Alerts returns a channel which gets notified by CheckPeers.
func (mc *Checker) Alerts() <-chan api.Alert {
func (mc *Checker) Alerts() <-chan *api.Alert {
return mc.alertCh
}

View File

@ -15,9 +15,9 @@ func TestChecker(t *testing.T) {
metrics := NewStore()
checker := NewChecker(metrics)
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Peer: test.PeerID1,
Value: "1",
Valid: true,
}
@ -25,7 +25,7 @@ func TestChecker(t *testing.T) {
metrics.Add(metr)
checker.CheckPeers([]peer.ID{test.TestPeerID1})
checker.CheckPeers([]peer.ID{test.PeerID1})
select {
case <-checker.Alerts():
t.Error("there should not be an alert yet")
@ -33,7 +33,7 @@ func TestChecker(t *testing.T) {
}
time.Sleep(3 * time.Second)
err := checker.CheckPeers([]peer.ID{test.TestPeerID1})
err := checker.CheckPeers([]peer.ID{test.PeerID1})
if err != nil {
t.Fatal(err)
}
@ -44,7 +44,7 @@ func TestChecker(t *testing.T) {
t.Error("an alert should have been triggered")
}
checker.CheckPeers([]peer.ID{test.TestPeerID2})
checker.CheckPeers([]peer.ID{test.PeerID2})
select {
case <-checker.Alerts():
t.Error("there should not be alerts for different peer")
@ -59,9 +59,9 @@ func TestCheckerWatch(t *testing.T) {
metrics := NewStore()
checker := NewChecker(metrics)
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Peer: test.PeerID1,
Value: "1",
Valid: true,
}
@ -69,7 +69,7 @@ func TestCheckerWatch(t *testing.T) {
metrics.Add(metr)
peersF := func(context.Context) ([]peer.ID, error) {
return []peer.ID{test.TestPeerID1}, nil
return []peer.ID{test.PeerID1}, nil
}
go checker.Watch(ctx, peersF, 200*time.Millisecond)

View File

@ -25,7 +25,7 @@ func NewStore() *Store {
}
// Add inserts a new metric in Metrics.
func (mtrs *Store) Add(m api.Metric) {
func (mtrs *Store) Add(m *api.Metric) {
mtrs.mux.Lock()
defer mtrs.mux.Unlock()
@ -49,16 +49,16 @@ func (mtrs *Store) Add(m api.Metric) {
// Latest returns all the last known valid metrics. A metric is valid
// if it has not expired.
func (mtrs *Store) Latest(name string) []api.Metric {
func (mtrs *Store) Latest(name string) []*api.Metric {
mtrs.mux.RLock()
defer mtrs.mux.RUnlock()
byPeer, ok := mtrs.byName[name]
if !ok {
return []api.Metric{}
return []*api.Metric{}
}
metrics := make([]api.Metric, 0, len(byPeer))
metrics := make([]*api.Metric, 0, len(byPeer))
for _, window := range byPeer {
m, err := window.Latest()
if err != nil || m.Discard() {
@ -71,11 +71,11 @@ func (mtrs *Store) Latest(name string) []api.Metric {
// PeerMetrics returns the latest metrics for a given peer ID for
// all known metrics types. It may return expired metrics.
func (mtrs *Store) PeerMetrics(pid peer.ID) []api.Metric {
func (mtrs *Store) PeerMetrics(pid peer.ID) []*api.Metric {
mtrs.mux.RLock()
defer mtrs.mux.RUnlock()
result := make([]api.Metric, 0)
result := make([]*api.Metric, 0)
for _, byPeer := range mtrs.byName {
window, ok := byPeer[pid]

View File

@ -11,9 +11,9 @@ import (
func TestStoreLatest(t *testing.T) {
store := NewStore()
metr := api.Metric{
metr := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Peer: test.PeerID1,
Value: "1",
Valid: true,
}

View File

@ -8,13 +8,13 @@ import (
// PeersetFilter removes all metrics not belonging to the given
// peerset
func PeersetFilter(metrics []api.Metric, peerset []peer.ID) []api.Metric {
func PeersetFilter(metrics []*api.Metric, peerset []peer.ID) []*api.Metric {
peerMap := make(map[peer.ID]struct{})
for _, pid := range peerset {
peerMap[pid] = struct{}{}
}
filtered := make([]api.Metric, 0, len(metrics))
filtered := make([]*api.Metric, 0, len(metrics))
for _, metric := range metrics {
_, ok := peerMap[metric.Peer]

View File

@ -18,7 +18,7 @@ var ErrNoMetrics = errors.New("no metrics have been added to this window")
// Window implements a circular queue to store metrics.
type Window struct {
last int
window []api.Metric
window []*api.Metric
}
// NewWindow creates an instance with the given
@ -28,7 +28,7 @@ func NewWindow(windowCap int) *Window {
panic("invalid windowCap")
}
w := make([]api.Metric, 0, windowCap)
w := make([]*api.Metric, 0, windowCap)
return &Window{
last: 0,
window: w,
@ -38,7 +38,7 @@ func NewWindow(windowCap int) *Window {
// Add adds a new metric to the window. If the window capacity
// has been reached, the oldest metric (by the time it was added),
// will be discarded.
func (mw *Window) Add(m api.Metric) {
func (mw *Window) Add(m *api.Metric) {
if len(mw.window) < cap(mw.window) {
mw.window = append(mw.window, m)
mw.last = len(mw.window) - 1
@ -53,9 +53,9 @@ func (mw *Window) Add(m api.Metric) {
// Latest returns the last metric added. It returns an error
// if no metrics were added.
func (mw *Window) Latest() (api.Metric, error) {
func (mw *Window) Latest() (*api.Metric, error) {
if len(mw.window) == 0 {
return api.Metric{}, ErrNoMetrics
return nil, ErrNoMetrics
}
return mw.window[mw.last], nil
}
@ -63,9 +63,9 @@ func (mw *Window) Latest() (api.Metric, error) {
// All returns all the metrics in the window, in the inverse order
// they were Added. That is, result[0] will be the last added
// metric.
func (mw *Window) All() []api.Metric {
func (mw *Window) All() []*api.Metric {
wlen := len(mw.window)
res := make([]api.Metric, 0, wlen)
res := make([]*api.Metric, 0, wlen)
if wlen == 0 {
return res
}

View File

@ -7,6 +7,17 @@ import (
"github.com/ipfs/ipfs-cluster/api"
)
func makeMetric(value string) *api.Metric {
metr := &api.Metric{
Name: "test",
Peer: "peer1",
Value: value,
Valid: true,
}
metr.SetTTL(5 * time.Second)
return metr
}
func TestMetricsWindow(t *testing.T) {
mw := NewWindow(4)
@ -19,15 +30,7 @@ func TestMetricsWindow(t *testing.T) {
t.Error("expected 0 metrics")
}
metr := api.Metric{
Name: "test",
Peer: "peer1",
Value: "1",
Valid: true,
}
metr.SetTTL(5 * time.Second)
mw.Add(metr)
mw.Add(makeMetric("1"))
metr2, err := mw.Latest()
if err != nil {
@ -38,10 +41,8 @@ func TestMetricsWindow(t *testing.T) {
t.Error("expected different value")
}
metr.Value = "2"
mw.Add(metr)
metr.Value = "3"
mw.Add(metr)
mw.Add(makeMetric("2"))
mw.Add(makeMetric("3"))
all := mw.All()
if len(all) != 3 {
@ -56,10 +57,8 @@ func TestMetricsWindow(t *testing.T) {
t.Error("older metric should be second")
}
metr.Value = "4"
mw.Add(metr)
metr.Value = "5"
mw.Add(metr)
mw.Add(makeMetric("4"))
mw.Add(makeMetric("5"))
all = mw.All()
if len(all) != 4 {

View File

@ -130,7 +130,7 @@ func (mon *Monitor) logFromPubsub() {
metric.Peer,
)
err = mon.LogMetric(ctx, metric)
err = mon.LogMetric(ctx, &metric)
if err != nil {
logger.Error(err)
continue
@ -170,7 +170,7 @@ func (mon *Monitor) Shutdown(ctx context.Context) error {
}
// LogMetric stores a metric so it can later be retrieved.
func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) LogMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/LogMetric")
defer span.End()
@ -180,7 +180,7 @@ func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error {
}
// PublishMetric broadcasts a metric to all current cluster peers.
func (mon *Monitor) PublishMetric(ctx context.Context, m api.Metric) error {
func (mon *Monitor) PublishMetric(ctx context.Context, m *api.Metric) error {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/PublishMetric")
defer span.End()
@ -235,7 +235,7 @@ func (mon *Monitor) getPeers(ctx context.Context) ([]peer.ID, error) {
// LatestMetrics returns last known VALID metrics of a given type. A metric
// is only valid if it has not expired and belongs to a current cluster peers.
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric {
func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []*api.Metric {
ctx, span := trace.StartSpan(ctx, "monitor/pubsub/LatestMetrics")
defer span.End()
@ -244,7 +244,7 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Make sure we only return metrics in the current peerset
peers, err := mon.getPeers(ctx)
if err != nil {
return []api.Metric{}
return []*api.Metric{}
}
return metrics.PeersetFilter(latest, peers)
@ -252,6 +252,6 @@ func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric
// Alerts returns a channel on which alerts are sent when the
// monitor detects a failure.
func (mon *Monitor) Alerts() <-chan api.Alert {
func (mon *Monitor) Alerts() <-chan *api.Alert {
return mon.checker.Alerts()
}

View File

@ -35,10 +35,10 @@ func newMetricFactory() *metricFactory {
}
}
func (mf *metricFactory) newMetric(n string, p peer.ID) api.Metric {
func (mf *metricFactory) newMetric(n string, p peer.ID) *api.Metric {
mf.l.Lock()
defer mf.l.Unlock()
m := api.Metric{
m := &api.Metric{
Name: n,
Peer: p,
Value: fmt.Sprintf("%d", mf.counter),
@ -111,9 +111,9 @@ func TestLogMetricConcurrent(t *testing.T) {
f := func() {
defer wg.Done()
for i := 0; i < 25; i++ {
mt := api.Metric{
mt := &api.Metric{
Name: "test",
Peer: test.TestPeerID1,
Peer: test.PeerID1,
Value: fmt.Sprintf("%d", time.Now().UnixNano()),
Valid: true,
}
@ -165,15 +165,15 @@ func TestPeerMonitorLogMetric(t *testing.T) {
mf := newMetricFactory()
// dont fill window
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID1))
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID2))
pm.LogMetric(ctx, mf.newMetric("test", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID1))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID2))
pm.LogMetric(ctx, mf.newMetric("test", test.PeerID3))
// fill window
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.TestPeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3))
latestMetrics := pm.LatestMetrics(ctx, "testbad")
if len(latestMetrics) != 0 {
@ -188,15 +188,15 @@ func TestPeerMonitorLogMetric(t *testing.T) {
for _, v := range latestMetrics {
switch v.Peer {
case test.TestPeerID1:
case test.PeerID1:
if v.Value != "0" {
t.Error("bad metric value")
}
case test.TestPeerID2:
case test.PeerID2:
if v.Value != "1" {
t.Error("bad metric value")
}
case test.TestPeerID3:
case test.PeerID3:
if v.Value != "2" {
t.Error("bad metric value")
}
@ -239,7 +239,7 @@ func TestPeerMonitorPublishMetric(t *testing.T) {
mf := newMetricFactory()
metric := mf.newMetric("test", test.TestPeerID1)
metric := mf.newMetric("test", test.PeerID1)
err = pm.PublishMetric(ctx, metric)
if err != nil {
t.Fatal(err)
@ -276,7 +276,7 @@ func TestPeerMonitorAlerts(t *testing.T) {
defer shutdown()
mf := newMetricFactory()
mtr := mf.newMetric("test", test.TestPeerID1)
mtr := mf.newMetric("test", test.PeerID1)
mtr.SetTTL(0)
pm.LogMetric(ctx, mtr)
time.Sleep(time.Second)
@ -291,7 +291,7 @@ func TestPeerMonitorAlerts(t *testing.T) {
if alrt.MetricName != "test" {
t.Error("Alert should be for test")
}
if alrt.Peer != test.TestPeerID1 {
if alrt.Peer != test.PeerID1 {
t.Error("Peer should be TestPeerID1")
}
}

View File

@ -82,7 +82,7 @@ func TestClustersPeerAdd(t *testing.T) {
}
}
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
err := clusters[1].Pin(ctx, api.PinCid(h))
if err != nil {
t.Fatal(err)
@ -362,8 +362,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
defer leader.Shutdown(ctx)
defer leaderMock.Close()
tmpCid, _ := cid.Decode(test.TestCid1)
prefix := tmpCid.Prefix()
prefix := test.Cid1.Prefix()
// Pin nCluster random pins. This ensures each peer will
// pin the same number of Cids.
@ -436,7 +435,7 @@ func TestClustersPeerJoin(t *testing.T) {
t.Fatal(err)
}
}
hash, _ := cid.Decode(test.TestCid1)
hash := test.Cid1
clusters[0].Pin(ctx, api.PinCid(hash))
pinDelay()
@ -479,7 +478,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
}
runF(t, clusters[1:], f)
hash, _ := cid.Decode(test.TestCid1)
hash := test.Cid1
clusters[0].Pin(ctx, api.PinCid(hash))
pinDelay()
@ -524,7 +523,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
// }
// runF(t, clusters[2:], f)
// hash, _ := cid.Decode(test.TestCid1)
// hash := test.Cid1
// clusters[0].Pin(api.PinCid(hash))
// delay()
@ -552,7 +551,7 @@ func TestClustersPeerRejoin(t *testing.T) {
defer shutdownClusters(t, clusters, mocks)
// pin something in c0
pin1, _ := cid.Decode(test.TestCid1)
pin1 := test.Cid1
err := clusters[0].Pin(ctx, api.PinCid(pin1))
if err != nil {
t.Fatal(err)
@ -592,7 +591,7 @@ func TestClustersPeerRejoin(t *testing.T) {
runF(t, clusters[1:], f)
// Pin something on the rest
pin2, _ := cid.Decode(test.TestCid2)
pin2 := test.Cid2
err = clusters[1].Pin(ctx, api.PinCid(pin2))
if err != nil {
t.Fatal(err)

View File

@ -139,7 +139,7 @@ func (mpt *MapPinTracker) pin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSPin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -158,7 +158,7 @@ func (mpt *MapPinTracker) unpin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSUnpin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -168,7 +168,7 @@ func (mpt *MapPinTracker) unpin(op *optracker.Operation) error {
}
// puts a new operation on the queue, unless ongoing exists
func (mpt *MapPinTracker) enqueue(ctx context.Context, c api.Pin, typ optracker.OperationType, ch chan *optracker.Operation) error {
func (mpt *MapPinTracker) enqueue(ctx context.Context, c *api.Pin, typ optracker.OperationType, ch chan *optracker.Operation) error {
ctx, span := trace.StartSpan(ctx, "tracker/map/enqueue")
defer span.End()
@ -191,7 +191,7 @@ func (mpt *MapPinTracker) enqueue(ctx context.Context, c api.Pin, typ optracker.
// Track tells the MapPinTracker to start managing a Cid,
// possibly triggering Pin operations on the IPFS daemon.
func (mpt *MapPinTracker) Track(ctx context.Context, c api.Pin) error {
func (mpt *MapPinTracker) Track(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "tracker/map/Track")
defer span.End()
@ -239,7 +239,7 @@ func (mpt *MapPinTracker) Untrack(ctx context.Context, c cid.Cid) error {
// Status returns information for a Cid tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Status")
defer span.End()
@ -248,7 +248,7 @@ func (mpt *MapPinTracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// StatusAll returns information for all Cids tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) StatusAll(ctx context.Context) []api.PinInfo {
func (mpt *MapPinTracker) StatusAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/StatusAll")
defer span.End()
@ -263,7 +263,7 @@ func (mpt *MapPinTracker) StatusAll(ctx context.Context) []api.PinInfo {
// Pins in error states can be recovered with Recover().
// An error is returned if we are unable to contact
// the IPFS daemon.
func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Sync")
defer span.End()
@ -272,7 +272,7 @@ func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, err
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
@ -292,12 +292,12 @@ func (mpt *MapPinTracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, err
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/SyncAll")
defer span.End()
var ipsMap map[string]api.IPFSPinStatus
var results []api.PinInfo
var results []*api.PinInfo
err := mpt.rpcClient.Call(
"",
"Cluster",
@ -323,7 +323,7 @@ func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
status := mpt.StatusAll(ctx)
for _, pInfoOrig := range status {
var pInfoNew api.PinInfo
var pInfoNew *api.PinInfo
c := pInfoOrig.Cid
ips, ok := ipsMap[c.String()]
if !ok {
@ -341,7 +341,7 @@ func (mpt *MapPinTracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
return results, nil
}
func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPFSPinStatus) *api.PinInfo {
status, ok := mpt.optracker.Status(ctx, c)
if !ok {
status = api.TrackerStatusUnpinned
@ -403,7 +403,7 @@ func (mpt *MapPinTracker) syncStatus(ctx context.Context, c cid.Cid, ips api.IPF
// Recover will re-queue a Cid in error state for the failed operation,
// possibly retriggering an IPFS pinning operation.
func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/Recover")
defer span.End()
@ -421,12 +421,12 @@ func (mpt *MapPinTracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo,
}
// RecoverAll attempts to recover all items tracked by this peer.
func (mpt *MapPinTracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
func (mpt *MapPinTracker) RecoverAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(mpt.ctx, "tracker/map/RecoverAll")
defer span.End()
pInfos := mpt.optracker.GetAll(ctx)
var results []api.PinInfo
var results []*api.PinInfo
for _, pInfo := range pInfos {
res, err := mpt.Recover(ctx, pInfo.Cid)
results = append(results, res)

View File

@ -17,8 +17,8 @@ import (
)
var (
pinCancelCid = test.TestCid3
unpinCancelCid = test.TestCid2
pinCancelCid = test.Cid3
unpinCancelCid = test.Cid2
ErrPinCancelCid = errors.New("should not have received rpc.IPFSPin operation")
ErrUnpinCancelCid = errors.New("should not have received rpc.IPFSUnpin operation")
)
@ -37,29 +37,27 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
case test.TestSlowCid1:
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.SlowCid1.String():
time.Sleep(2 * time.Second)
case pinCancelCid:
case pinCancelCid.String():
return ErrPinCancelCid
}
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
case test.TestSlowCid1:
func (mock *mockService) IPFSUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.SlowCid1.String():
time.Sleep(2 * time.Second)
case unpinCancelCid:
case unpinCancelCid.String():
return ErrUnpinCancelCid
}
return nil
}
func testPin(c cid.Cid, min, max int, allocs ...peer.ID) api.Pin {
func testPin(c cid.Cid, min, max int, allocs ...peer.ID) *api.Pin {
pin := api.PinCid(c)
pin.ReplicationFactorMin = min
pin.ReplicationFactorMax = max
@ -71,7 +69,7 @@ func testSlowMapPinTracker(t *testing.T) *MapPinTracker {
cfg := &Config{}
cfg.Default()
cfg.ConcurrentPins = 1
mpt := NewMapPinTracker(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := NewMapPinTracker(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(mockRPCClient(t))
return mpt
}
@ -80,7 +78,7 @@ func testMapPinTracker(t *testing.T) *MapPinTracker {
cfg := &Config{}
cfg.Default()
cfg.ConcurrentPins = 1
mpt := NewMapPinTracker(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := NewMapPinTracker(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(test.NewMockRPCClient(t))
return mpt
}
@ -109,7 +107,7 @@ func TestTrack(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h, _ := cid.Decode(test.TestCid1)
h := test.Cid1
// Let's tart with a local pin
c := testPin(h, -1, -1)
@ -127,7 +125,7 @@ func TestTrack(t *testing.T) {
}
// Unpin and set remote
c = testPin(h, 1, 1, test.TestPeerID2)
c = testPin(h, 1, 1, test.PeerID2)
err = mpt.Track(context.Background(), c)
if err != nil {
t.Fatal(err)
@ -146,8 +144,8 @@ func TestUntrack(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h2, _ := cid.Decode(test.TestCid2)
h1 := test.Cid1
h2 := test.Cid2
// LocalPin
c := testPin(h1, -1, -1)
@ -158,7 +156,7 @@ func TestUntrack(t *testing.T) {
}
// Remote pin
c = testPin(h2, 1, 1, test.TestPeerID2)
c = testPin(h2, 1, 1, test.PeerID2)
err = mpt.Track(context.Background(), c)
if err != nil {
t.Fatal(err)
@ -197,8 +195,8 @@ func TestStatusAll(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h2, _ := cid.Decode(test.TestCid2)
h1 := test.Cid1
h2 := test.Cid2
// LocalPin
c := testPin(h1, -1, -1)
@ -229,8 +227,8 @@ func TestSyncAndRecover(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h2, _ := cid.Decode(test.TestCid2)
h1 := test.Cid1
h2 := test.Cid2
c := testPin(h1, -1, -1)
mpt.Track(context.Background(), c)
@ -282,7 +280,7 @@ func TestRecoverAll(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h1 := test.Cid1
c := testPin(h1, -1, -1)
mpt.Track(context.Background(), c)
@ -319,8 +317,8 @@ func TestSyncAll(t *testing.T) {
t.Fatal("should not have synced anything when it tracks nothing")
}
h1, _ := cid.Decode(test.TestCid1)
h2, _ := cid.Decode(test.TestCid2)
h1 := test.Cid1
h2 := test.Cid2
c := testPin(h1, -1, -1)
mpt.Track(context.Background(), c)
@ -344,7 +342,7 @@ func TestUntrackTrack(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h1 := test.Cid1
// LocalPin
c := testPin(h1, -1, -1)
@ -366,7 +364,7 @@ func TestTrackUntrackWithCancel(t *testing.T) {
mpt := testSlowMapPinTracker(t)
defer mpt.Shutdown(ctx)
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
slowPinCid := test.SlowCid1
// LocalPin
slowPin := testPin(slowPinCid, -1, -1)
@ -406,8 +404,8 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
mpt := testSlowMapPinTracker(t)
defer mpt.Shutdown(ctx)
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
fastPinCid, _ := cid.Decode(pinCancelCid)
slowPinCid := test.SlowCid1
fastPinCid := pinCancelCid
// SlowLocalPin
slowPin := testPin(slowPinCid, -1, -1)
@ -452,7 +450,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
mpt := testSlowMapPinTracker(t)
defer mpt.Shutdown(ctx)
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
slowPinCid := test.SlowCid1
// LocalPin
slowPin := testPin(slowPinCid, -1, -1)
@ -502,8 +500,8 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
mpt := testSlowMapPinTracker(t)
defer mpt.Shutdown(ctx)
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
fastPinCid, _ := cid.Decode(unpinCancelCid)
slowPinCid := test.SlowCid1
fastPinCid := unpinCancelCid
// SlowLocalPin
slowPin := testPin(slowPinCid, -1, -1)
@ -558,7 +556,7 @@ func TestTrackUntrackConcurrent(t *testing.T) {
mpt := testMapPinTracker(t)
defer mpt.Shutdown(ctx)
h1, _ := cid.Decode(test.TestCid1)
h1 := test.Cid1
// LocalPin
c := testPin(h1, -1, -1)

View File

@ -56,7 +56,7 @@ type Operation struct {
// RO fields
opType OperationType
pin api.Pin
pin *api.Pin
// RW fields
mu sync.RWMutex
@ -66,7 +66,7 @@ type Operation struct {
}
// NewOperation creates a new Operation.
func NewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase) *Operation {
func NewOperation(ctx context.Context, pin *api.Pin, typ OperationType, ph Phase) *Operation {
ctx, span := trace.StartSpan(ctx, "optracker/NewOperation")
defer span.End()
@ -147,7 +147,7 @@ func (op *Operation) Type() OperationType {
}
// Pin returns the Pin object associated to the operation.
func (op *Operation) Pin() api.Pin {
func (op *Operation) Pin() *api.Pin {
return op.pin
}

View File

@ -12,9 +12,8 @@ import (
func TestOperation(t *testing.T) {
tim := time.Now().Add(-2 * time.Second)
h := test.MustDecodeCid(test.TestCid1)
op := NewOperation(context.Background(), api.PinCid(h), OperationUnpin, PhaseQueued)
if op.Cid() != h {
op := NewOperation(context.Background(), api.PinCid(test.Cid1), OperationUnpin, PhaseQueued)
if !op.Cid().Equals(test.Cid1) {
t.Error("bad cid")
}
if op.Phase() != PhaseQueued {

View File

@ -45,7 +45,7 @@ func NewOperationTracker(ctx context.Context, pid peer.ID, peerName string) *Ope
//
// If an operation exists it is of different type, it is
// cancelled and the new one replaces it in the tracker.
func (opt *OperationTracker) TrackNewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase) *Operation {
func (opt *OperationTracker) TrackNewOperation(ctx context.Context, pin *api.Pin, typ OperationType, ph Phase) *Operation {
ctx = trace.NewContext(opt.ctx, trace.FromContext(ctx))
ctx, span := trace.StartSpan(ctx, "optracker/TrackNewOperation")
defer span.End()
@ -140,7 +140,7 @@ func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) a
}
// Get returns a PinInfo object for Cid.
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End()
@ -151,12 +151,12 @@ func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
if pInfo.Cid == cid.Undef {
pInfo.Cid = c
}
return pInfo
return &pInfo
}
// GetExists returns a PinInfo object for a Cid only if there exists
// an associated Operation.
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinInfo, bool) {
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (*api.PinInfo, bool) {
ctx, span := trace.StartSpan(ctx, "optracker/GetExists")
defer span.End()
@ -164,22 +164,23 @@ func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinI
defer opt.mu.RUnlock()
op, ok := opt.operations[c.String()]
if !ok {
return api.PinInfo{}, false
return nil, false
}
pInfo := opt.unsafePinInfo(ctx, op)
return pInfo, true
return &pInfo, true
}
// GetAll returns PinInfo objets for all known operations.
func (opt *OperationTracker) GetAll(ctx context.Context) []api.PinInfo {
func (opt *OperationTracker) GetAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End()
var pinfos []api.PinInfo
var pinfos []*api.PinInfo
opt.mu.RLock()
defer opt.mu.RUnlock()
for _, op := range opt.operations {
pinfos = append(pinfos, opt.unsafePinInfo(ctx, op))
pinfo := opt.unsafePinInfo(ctx, op)
pinfos = append(pinfos, &pinfo)
}
return pinfos
}
@ -228,13 +229,14 @@ func (opt *OperationTracker) OpContext(ctx context.Context, c cid.Cid) context.C
// Operations that matched the provided filter. Note, only supports
// filters of type OperationType or Phase, any other type
// will result in a nil slice being returned.
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []api.PinInfo {
var pinfos []api.PinInfo
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []*api.PinInfo {
var pinfos []*api.PinInfo
opt.mu.RLock()
defer opt.mu.RUnlock()
ops := filterOpsMap(ctx, opt.operations, filters)
for _, op := range ops {
pinfos = append(pinfos, opt.unsafePinInfo(ctx, op))
pinfo := opt.unsafePinInfo(ctx, op)
pinfos = append(pinfos, &pinfo)
}
return pinfos
}

View File

@ -11,14 +11,13 @@ import (
func testOperationTracker(t *testing.T) *OperationTracker {
ctx := context.Background()
return NewOperationTracker(ctx, test.TestPeerID1, test.TestPeerName1)
return NewOperationTracker(ctx, test.PeerID1, test.PeerName1)
}
func TestOperationTracker_TrackNewOperation(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
op := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseQueued)
op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued)
t.Run("track new operation", func(t *testing.T) {
if op == nil {
@ -42,14 +41,14 @@ func TestOperationTracker_TrackNewOperation(t *testing.T) {
})
t.Run("track when ongoing operation", func(t *testing.T) {
op2 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseInProgress)
op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress)
if op2 != nil {
t.Fatal("should not have created new operation")
}
})
t.Run("track of different type", func(t *testing.T) {
op2 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationUnpin, PhaseQueued)
op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued)
if op2 == nil {
t.Fatal("should have created a new operation")
}
@ -60,24 +59,24 @@ func TestOperationTracker_TrackNewOperation(t *testing.T) {
})
t.Run("track of same type when done", func(t *testing.T) {
op2 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseDone)
op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
if op2 == nil {
t.Fatal("should have created a new operation")
}
op3 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseQueued)
op3 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued)
if op3 == nil {
t.Fatal("should have created a new operation when other is in Done")
}
})
t.Run("track of same type when error", func(t *testing.T) {
op4 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationUnpin, PhaseError)
op4 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseError)
if op4 == nil {
t.Fatal("should have created a new operation")
}
op5 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationUnpin, PhaseQueued)
op5 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued)
if op5 == nil {
t.Fatal("should have created a new operation")
}
@ -87,12 +86,11 @@ func TestOperationTracker_TrackNewOperation(t *testing.T) {
func TestOperationTracker_Clean(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
op := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseQueued)
op2 := opt.TrackNewOperation(ctx, api.PinCid(h), OperationUnpin, PhaseQueued)
op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued)
op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued)
t.Run("clean older operation", func(t *testing.T) {
opt.Clean(ctx, op)
st, ok := opt.Status(ctx, h)
st, ok := opt.Status(ctx, test.Cid1)
if !ok || st != api.TrackerStatusUnpinQueued {
t.Fatal("should not have cleaned the latest op")
}
@ -100,7 +98,7 @@ func TestOperationTracker_Clean(t *testing.T) {
t.Run("clean current operation", func(t *testing.T) {
opt.Clean(ctx, op2)
_, ok := opt.Status(ctx, h)
_, ok := opt.Status(ctx, test.Cid1)
if ok {
t.Fatal("should have cleaned the latest op")
}
@ -110,14 +108,13 @@ func TestOperationTracker_Clean(t *testing.T) {
func TestOperationTracker_Status(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
opt.TrackNewOperation(ctx, api.PinCid(h), OperationRemote, PhaseDone)
st, ok := opt.Status(ctx, h)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationRemote, PhaseDone)
st, ok := opt.Status(ctx, test.Cid1)
if !ok || st != api.TrackerStatusRemote {
t.Error("should provide status remote")
}
_, ok = opt.Status(ctx, h)
_, ok = opt.Status(ctx, test.Cid1)
if !ok {
t.Error("should signal unexistent status")
}
@ -126,10 +123,9 @@ func TestOperationTracker_Status(t *testing.T) {
func TestOperationTracker_SetError(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseDone)
opt.SetError(ctx, h, errors.New("fake error"))
pinfo := opt.Get(ctx, h)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
opt.SetError(ctx, test.Cid1, errors.New("fake error"))
pinfo := opt.Get(ctx, test.Cid1)
if pinfo.Status != api.TrackerStatusPinError {
t.Error("should have updated the status")
}
@ -137,9 +133,9 @@ func TestOperationTracker_SetError(t *testing.T) {
t.Error("should have set the error message")
}
opt.TrackNewOperation(ctx, api.PinCid(h), OperationUnpin, PhaseQueued)
opt.SetError(ctx, h, errors.New("fake error"))
st, ok := opt.Status(ctx, h)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued)
opt.SetError(ctx, test.Cid1, errors.New("fake error"))
st, ok := opt.Status(ctx, test.Cid1)
if !ok || st != api.TrackerStatusUnpinQueued {
t.Error("should not have set an error on in-flight items")
}
@ -148,35 +144,33 @@ func TestOperationTracker_SetError(t *testing.T) {
func TestOperationTracker_Get(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
h2 := test.MustDecodeCid(test.TestCid2)
opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseDone)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
t.Run("Get with existing item", func(t *testing.T) {
pinfo := opt.Get(ctx, h)
pinfo := opt.Get(ctx, test.Cid1)
if pinfo.Status != api.TrackerStatusPinned {
t.Error("bad status")
}
if pinfo.Cid != h {
if !pinfo.Cid.Equals(test.Cid1) {
t.Error("bad cid")
}
if pinfo.Peer != test.TestPeerID1 {
if pinfo.Peer != test.PeerID1 {
t.Error("bad peer ID")
}
})
t.Run("Get with unexisting item", func(t *testing.T) {
pinfo := opt.Get(ctx, h2)
pinfo := opt.Get(ctx, test.Cid2)
if pinfo.Status != api.TrackerStatusUnpinned {
t.Error("bad status")
}
if pinfo.Cid != h2 {
if !pinfo.Cid.Equals(test.Cid2) {
t.Error("bad cid")
}
if pinfo.Peer != test.TestPeerID1 {
if pinfo.Peer != test.PeerID1 {
t.Error("bad peer ID")
}
})
@ -185,8 +179,7 @@ func TestOperationTracker_Get(t *testing.T) {
func TestOperationTracker_GetAll(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseInProgress)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress)
pinfos := opt.GetAll(ctx)
if len(pinfos) != 1 {
t.Fatal("expected 1 item")
@ -199,10 +192,9 @@ func TestOperationTracker_GetAll(t *testing.T) {
func TestOperationTracker_OpContext(t *testing.T) {
ctx := context.Background()
opt := testOperationTracker(t)
h := test.MustDecodeCid(test.TestCid1)
op := opt.TrackNewOperation(ctx, api.PinCid(h), OperationPin, PhaseInProgress)
op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress)
ctx1 := op.Context()
ctx2 := opt.OpContext(ctx, h)
ctx2 := opt.OpContext(ctx, test.Cid1)
if ctx1 != ctx2 {
t.Fatal("didn't get the right context")
}
@ -211,9 +203,9 @@ func TestOperationTracker_OpContext(t *testing.T) {
func TestOperationTracker_filterOps(t *testing.T) {
ctx := context.Background()
testOpsMap := map[string]*Operation{
test.TestCid1: &Operation{pin: api.PinCid(test.MustDecodeCid(test.TestCid1)), opType: OperationPin, phase: PhaseQueued},
test.TestCid2: &Operation{pin: api.PinCid(test.MustDecodeCid(test.TestCid2)), opType: OperationPin, phase: PhaseInProgress},
test.TestCid3: &Operation{pin: api.PinCid(test.MustDecodeCid(test.TestCid3)), opType: OperationUnpin, phase: PhaseInProgress},
test.Cid1.String(): &Operation{pin: api.PinCid(test.Cid1), opType: OperationPin, phase: PhaseQueued},
test.Cid2.String(): &Operation{pin: api.PinCid(test.Cid2), opType: OperationPin, phase: PhaseInProgress},
test.Cid3.String(): &Operation{pin: api.PinCid(test.Cid3), opType: OperationUnpin, phase: PhaseInProgress},
}
opt := &OperationTracker{ctx: ctx, operations: testOpsMap}

View File

@ -22,8 +22,8 @@ import (
)
var (
pinCancelCid = test.TestCid3
unpinCancelCid = test.TestCid2
pinCancelCid = test.Cid3
unpinCancelCid = test.Cid2
ErrPinCancelCid = errors.New("should not have received rpc.IPFSPin operation")
ErrUnpinCancelCid = errors.New("should not have received rpc.IPFSUnpin operation")
pinOpts = api.PinOptions{
@ -46,22 +46,22 @@ func mockRPCClient(t testing.TB) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
c := in.Cid
switch c.String() {
case test.TestSlowCid1:
case test.SlowCid1.String():
time.Sleep(3 * time.Second)
case pinCancelCid:
case pinCancelCid.String():
return ErrPinCancelCid
}
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
switch in.Cid {
case test.TestCid1, test.TestCid2:
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
switch in.String() {
case test.Cid1.String(), test.Cid2.String():
*out = api.IPFSPinStatusRecursive
case test.TestCid4:
case test.Cid4.String():
*out = api.IPFSPinStatusError
return errors.New("an ipfs error")
default:
@ -70,12 +70,11 @@ func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
case test.TestSlowCid1:
func (mock *mockService) IPFSUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.SlowCid1.String():
time.Sleep(3 * time.Second)
case unpinCancelCid:
case unpinCancelCid.String():
return ErrUnpinCancelCid
}
return nil
@ -83,33 +82,35 @@ func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *s
func (mock *mockService) IPFSPinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
m := map[string]api.IPFSPinStatus{
test.TestCid1: api.IPFSPinStatusRecursive,
test.Cid1.String(): api.IPFSPinStatusRecursive,
}
*out = m
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
*out = []api.PinSerial{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts).ToSerial(),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts).ToSerial(),
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
*out = []*api.Pin{
api.PinWithOpts(test.Cid1, pinOpts),
api.PinWithOpts(test.Cid3, pinOpts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
case test.ErrorCid:
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case test.ErrorCid.String():
return errors.New("expected error when using ErrorCid")
case test.TestCid1, test.TestCid2:
*out = api.PinWithOpts(test.MustDecodeCid(in.Cid), pinOpts).ToSerial()
case test.Cid1.String(), test.Cid2.String():
pin := api.PinWithOpts(in, pinOpts)
*out = *pin
return nil
}
*out = in
pin := api.PinCid(in)
*out = *pin
return nil
}
var sortPinInfoByCid = func(p []api.PinInfo) {
var sortPinInfoByCid = func(p []*api.PinInfo) {
sort.Slice(p, func(i, j int) bool {
return p[i].Cid.String() < p[j].Cid.String()
})
@ -119,7 +120,7 @@ func testSlowMapPinTracker(t testing.TB) *maptracker.MapPinTracker {
cfg := &maptracker.Config{}
cfg.Default()
cfg.ConcurrentPins = 1
mpt := maptracker.NewMapPinTracker(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := maptracker.NewMapPinTracker(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(mockRPCClient(t))
return mpt
}
@ -128,7 +129,7 @@ func testMapPinTracker(t testing.TB) *maptracker.MapPinTracker {
cfg := &maptracker.Config{}
cfg.Default()
cfg.ConcurrentPins = 1
mpt := maptracker.NewMapPinTracker(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := maptracker.NewMapPinTracker(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(test.NewMockRPCClient(t))
return mpt
}
@ -136,7 +137,7 @@ func testMapPinTracker(t testing.TB) *maptracker.MapPinTracker {
func testSlowStatelessPinTracker(t testing.TB) *stateless.Tracker {
cfg := &stateless.Config{}
cfg.Default()
mpt := stateless.New(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := stateless.New(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(mockRPCClient(t))
return mpt
}
@ -144,14 +145,14 @@ func testSlowStatelessPinTracker(t testing.TB) *stateless.Tracker {
func testStatelessPinTracker(t testing.TB) *stateless.Tracker {
cfg := &stateless.Config{}
cfg.Default()
spt := stateless.New(cfg, test.TestPeerID1, test.TestPeerName1)
spt := stateless.New(cfg, test.PeerID1, test.PeerName1)
spt.SetClient(test.NewMockRPCClient(t))
return spt
}
func TestPinTracker_Track(t *testing.T) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -162,7 +163,7 @@ func TestPinTracker_Track(t *testing.T) {
{
"basic stateless track",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testStatelessPinTracker(t),
},
false,
@ -170,7 +171,7 @@ func TestPinTracker_Track(t *testing.T) {
{
"basic map track",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testMapPinTracker(t),
},
false,
@ -187,7 +188,7 @@ func TestPinTracker_Track(t *testing.T) {
func BenchmarkPinTracker_Track(b *testing.B) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -197,14 +198,14 @@ func BenchmarkPinTracker_Track(b *testing.B) {
{
"basic stateless track",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testStatelessPinTracker(b),
},
},
{
"basic map track",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testMapPinTracker(b),
},
},
@ -234,7 +235,7 @@ func TestPinTracker_Untrack(t *testing.T) {
{
"basic stateless untrack",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testStatelessPinTracker(t),
},
false,
@ -242,7 +243,7 @@ func TestPinTracker_Untrack(t *testing.T) {
{
"basic map untrack",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testMapPinTracker(t),
},
false,
@ -259,31 +260,31 @@ func TestPinTracker_Untrack(t *testing.T) {
func TestPinTracker_StatusAll(t *testing.T) {
type args struct {
c api.Pin
c *api.Pin
tracker ipfscluster.PinTracker
}
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
}{
{
"basic stateless statusall",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusRemote,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid3),
{
Cid: test.Cid3,
Status: api.TrackerStatusPinned,
},
},
@ -291,12 +292,12 @@ func TestPinTracker_StatusAll(t *testing.T) {
{
"basic map statusall",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
@ -304,12 +305,12 @@ func TestPinTracker_StatusAll(t *testing.T) {
{
"slow stateless statusall",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
@ -317,12 +318,12 @@ func TestPinTracker_StatusAll(t *testing.T) {
{
"slow map statusall",
args{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
testSlowMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
@ -402,44 +403,44 @@ func TestPinTracker_Status(t *testing.T) {
{
"basic stateless status",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
{
"basic map status",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
{
"basic stateless status/unpinned",
args{
test.MustDecodeCid(test.TestCid4),
test.Cid4,
testStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid4),
Cid: test.Cid4,
Status: api.TrackerStatusUnpinned,
},
},
{
"basic map status/unpinned",
args{
test.MustDecodeCid(test.TestCid4),
test.Cid4,
testMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid4),
Cid: test.Cid4,
Status: api.TrackerStatusUnpinned,
},
},
@ -447,22 +448,22 @@ func TestPinTracker_Status(t *testing.T) {
{
"slow stateless status",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testSlowStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
{
"slow map status",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testSlowMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
@ -473,7 +474,7 @@ func TestPinTracker_Status(t *testing.T) {
case *maptracker.MapPinTracker:
// the Track preps the internal map of the MapPinTracker
// not required by the Stateless impl
pin := api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts)
pin := api.PinWithOpts(test.Cid1, pinOpts)
if err := tt.args.tracker.Track(context.Background(), pin); err != nil {
t.Errorf("PinTracker.Track() error = %v", err)
}
@ -501,25 +502,25 @@ func TestPinTracker_SyncAll(t *testing.T) {
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
"basic stateless syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},
@ -529,18 +530,18 @@ func TestPinTracker_SyncAll(t *testing.T) {
"basic map syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},
@ -550,18 +551,18 @@ func TestPinTracker_SyncAll(t *testing.T) {
"slow stateless syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},
@ -571,18 +572,18 @@ func TestPinTracker_SyncAll(t *testing.T) {
"slow map syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testSlowMapPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},
@ -638,11 +639,11 @@ func TestPinTracker_Sync(t *testing.T) {
{
"basic stateless sync",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -650,11 +651,11 @@ func TestPinTracker_Sync(t *testing.T) {
{
"basic map sync",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -662,11 +663,11 @@ func TestPinTracker_Sync(t *testing.T) {
{
"slow stateless sync",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testSlowStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -674,11 +675,11 @@ func TestPinTracker_Sync(t *testing.T) {
{
"slow map sync",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testSlowMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -689,7 +690,7 @@ func TestPinTracker_Sync(t *testing.T) {
switch tt.args.tracker.(type) {
case *maptracker.MapPinTracker:
// the Track preps the internal map of the MapPinTracker; not required by the Stateless impl
pin := api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts)
pin := api.PinWithOpts(test.Cid1, pinOpts)
if err := tt.args.tracker.Track(context.Background(), pin); err != nil {
t.Errorf("PinTracker.Track() error = %v", err)
}
@ -717,31 +718,31 @@ func TestPinTracker_Sync(t *testing.T) {
func TestPinTracker_RecoverAll(t *testing.T) {
type args struct {
tracker ipfscluster.PinTracker
pin api.Pin // only used by maptracker
pin *api.Pin // only used by maptracker
}
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
"basic stateless recoverall",
args{
testStatelessPinTracker(t),
api.Pin{},
&api.Pin{},
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusRemote,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid3),
{
Cid: test.Cid3,
Status: api.TrackerStatusPinned,
},
},
@ -751,11 +752,11 @@ func TestPinTracker_RecoverAll(t *testing.T) {
"basic map recoverall",
args{
testMapPinTracker(t),
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts),
api.PinWithOpts(test.Cid1, pinOpts),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
},
@ -817,11 +818,11 @@ func TestPinTracker_Recover(t *testing.T) {
{
"basic stateless recover",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -829,11 +830,11 @@ func TestPinTracker_Recover(t *testing.T) {
{
"basic map recover",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -868,11 +869,11 @@ func TestUntrackTrack(t *testing.T) {
{
"basic stateless untrack track",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -880,11 +881,11 @@ func TestUntrackTrack(t *testing.T) {
{
"basic map untrack track",
args{
test.MustDecodeCid(test.TestCid1),
test.Cid1,
testMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
false,
@ -921,11 +922,11 @@ func TestTrackUntrackWithCancel(t *testing.T) {
{
"slow stateless tracker untrack w/ cancel",
args{
test.MustDecodeCid(test.TestSlowCid1),
test.SlowCid1,
testSlowStatelessPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestSlowCid1),
Cid: test.SlowCid1,
Status: api.TrackerStatusPinned,
},
false,
@ -933,11 +934,11 @@ func TestTrackUntrackWithCancel(t *testing.T) {
{
"slow map tracker untrack w/ cancel",
args{
test.MustDecodeCid(test.TestSlowCid1),
test.SlowCid1,
testSlowMapPinTracker(t),
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestSlowCid1),
Cid: test.SlowCid1,
Status: api.TrackerStatusPinned,
},
false,
@ -988,10 +989,10 @@ func TestTrackUntrackWithCancel(t *testing.T) {
func TestPinTracker_RemoteIgnoresError(t *testing.T) {
ctx := context.Background()
testF := func(t *testing.T, pt ipfscluster.PinTracker) {
remoteCid := test.MustDecodeCid(test.TestCid4)
remoteCid := test.Cid4
remote := api.PinWithOpts(remoteCid, pinOpts)
remote.Allocations = []peer.ID{test.TestPeerID2}
remote.Allocations = []peer.ID{test.PeerID2}
remote.ReplicationFactorMin = 1
remote.ReplicationFactorMax = 1

View File

@ -123,7 +123,7 @@ func (spt *Tracker) pin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSPin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -142,7 +142,7 @@ func (spt *Tracker) unpin(op *optracker.Operation) error {
"",
"Cluster",
"IPFSUnpin",
op.Pin().ToSerial(),
op.Pin(),
&struct{}{},
)
if err != nil {
@ -152,7 +152,7 @@ func (spt *Tracker) unpin(op *optracker.Operation) error {
}
// Enqueue puts a new operation on the queue, unless ongoing exists.
func (spt *Tracker) enqueue(ctx context.Context, c api.Pin, typ optracker.OperationType) error {
func (spt *Tracker) enqueue(ctx context.Context, c *api.Pin, typ optracker.OperationType) error {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/enqueue")
defer span.End()
@ -217,7 +217,7 @@ func (spt *Tracker) Shutdown(ctx context.Context) error {
// Track tells the StatelessPinTracker to start managing a Cid,
// possibly triggering Pin operations on the IPFS daemon.
func (spt *Tracker) Track(ctx context.Context, c api.Pin) error {
func (spt *Tracker) Track(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Track")
defer span.End()
@ -263,7 +263,7 @@ func (spt *Tracker) Untrack(ctx context.Context, c cid.Cid) error {
}
// StatusAll returns information for all Cids pinned to the local IPFS node.
func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
func (spt *Tracker) StatusAll(ctx context.Context) []*api.PinInfo {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll")
defer span.End()
@ -280,7 +280,7 @@ func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
pininfos[infop.Cid.String()] = infop
}
var pis []api.PinInfo
var pis []*api.PinInfo
for _, pi := range pininfos {
pis = append(pis, pi)
}
@ -288,7 +288,7 @@ func (spt *Tracker) StatusAll(ctx context.Context) []api.PinInfo {
}
// Status returns information for a Cid pinned to the local IPFS node.
func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
func (spt *Tracker) Status(ctx context.Context, c cid.Cid) *api.PinInfo {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Status")
defer span.End()
@ -300,18 +300,18 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// check global state to see if cluster should even be caring about
// the provided cid
var gpinS api.PinSerial
var gpin api.Pin
err := spt.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
&gpinS,
c,
&gpin,
)
if err != nil {
if rpc.IsRPCError(err) {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusClusterError,
@ -320,7 +320,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
}
}
// not part of global state. we should not care about
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusUnpinned,
@ -328,11 +328,9 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
}
}
gpin := gpinS.ToPin()
// check if pin is a meta pin
if gpin.Type == api.MetaType {
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusSharded,
@ -342,7 +340,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// check if pin is a remote pin
if gpin.IsRemotePin(spt.peerID) {
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -356,22 +354,20 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
if err != nil {
logger.Error(err)
return api.PinInfo{}
return nil
}
pi := api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
TS: time.Now(),
}
return pi
}
// SyncAll verifies that the statuses of all tracked Cids (from the shared state)
@ -382,7 +378,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (spt *Tracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
func (spt *Tracker) SyncAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/SyncAll")
defer span.End()
@ -409,7 +405,7 @@ func (spt *Tracker) SyncAll(ctx context.Context) ([]api.PinInfo, error) {
}
// Sync returns the updated local status for the given Cid.
func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Sync")
defer span.End()
@ -421,18 +417,18 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
if oppi.Status == api.TrackerStatusUnpinError {
// check global state to see if cluster should even be caring about
// the provided cid
var gpin api.PinSerial
var gpin api.Pin
err := spt.rpcClient.Call(
"",
"Cluster",
"PinGet",
api.PinCid(c).ToSerial(),
c,
&gpin,
)
if err != nil {
if rpc.IsRPCError(err) {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusClusterError,
@ -442,7 +438,7 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
// it isn't in the global state
spt.optracker.CleanError(ctx, c)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusUnpinned,
@ -450,9 +446,9 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}, nil
}
// check if pin is a remote pin
if gpin.ToPin().IsRemotePin(spt.peerID) {
if gpin.IsRemotePin(spt.peerID) {
spt.optracker.CleanError(ctx, c)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -468,12 +464,12 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
"",
"Cluster",
"IPFSPinLsCid",
api.PinCid(c).ToSerial(),
c,
&ips,
)
if err != nil {
logger.Error(err)
return api.PinInfo{
return &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: api.TrackerStatusPinError,
@ -483,7 +479,7 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
if ips.ToTrackerStatus() == api.TrackerStatusPinned {
spt.optracker.CleanError(ctx, c)
pi := api.PinInfo{
pi := &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
@ -497,12 +493,12 @@ func (spt *Tracker) Sync(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
}
// RecoverAll attempts to recover all items tracked by this peer.
func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
func (spt *Tracker) RecoverAll(ctx context.Context) ([]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll")
defer span.End()
statuses := spt.StatusAll(ctx)
resp := make([]api.PinInfo, 0)
resp := make([]*api.PinInfo, 0)
for _, st := range statuses {
r, err := spt.Recover(ctx, st.Cid)
if err != nil {
@ -516,7 +512,7 @@ func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
// Recover will re-track or re-untrack a Cid in error state,
// possibly retriggering an IPFS pinning operation and returning
// only when it is done.
func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover")
defer span.End()
@ -540,7 +536,7 @@ func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error)
return spt.Status(ctx, c), nil
}
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo, error) {
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll")
defer span.End()
@ -557,14 +553,14 @@ func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo,
logger.Error(err)
return nil, err
}
pins := make(map[string]api.PinInfo, 0)
pins := make(map[string]*api.PinInfo, 0)
for cidstr, ips := range ipsMap {
c, err := cid.Decode(cidstr)
if err != nil {
logger.Error(err)
continue
}
p := api.PinInfo{
p := &api.PinInfo{
Cid: c,
Peer: spt.peerID,
Status: ips.ToTrackerStatus(),
@ -578,30 +574,26 @@ func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[string]api.PinInfo,
// localStatus returns a joint set of consensusState and ipfsStatus
// marking pins which should be meta or remote and leaving any ipfs pins that
// aren't in the consensusState out.
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]api.PinInfo, error) {
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]*api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/localStatus")
defer span.End()
pininfos := make(map[string]api.PinInfo)
pininfos := make(map[string]*api.PinInfo)
// get shared state
var statePinsSerial []api.PinSerial
var statePins []*api.Pin
err := spt.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Pins",
struct{}{},
&statePinsSerial,
&statePins,
)
if err != nil {
logger.Error(err)
return nil, err
}
var statePins []api.Pin
for _, p := range statePinsSerial {
statePins = append(statePins, p.ToPin())
}
// get statuses from ipfs node first
localpis, err := spt.ipfsStatusAll(ctx)
@ -614,7 +606,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
pCid := p.Cid.String()
if p.Type == api.MetaType && incExtra {
// add pin to pininfos with sharded status
pininfos[pCid] = api.PinInfo{
pininfos[pCid] = &api.PinInfo{
Cid: p.Cid,
Peer: spt.peerID,
Status: api.TrackerStatusSharded,
@ -625,7 +617,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
if p.IsRemotePin(spt.peerID) && incExtra {
// add pin to pininfos with a status of remote
pininfos[pCid] = api.PinInfo{
pininfos[pCid] = &api.PinInfo{
Cid: p.Cid,
Peer: spt.peerID,
Status: api.TrackerStatusRemote,
@ -641,7 +633,7 @@ func (spt *Tracker) localStatus(ctx context.Context, incExtra bool) (map[string]
return pininfos, nil
}
func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo {
func (spt *Tracker) getErrorsAll(ctx context.Context) []*api.PinInfo {
return spt.optracker.Filter(ctx, optracker.PhaseError)
}

View File

@ -15,8 +15,8 @@ import (
)
var (
pinCancelCid = test.TestCid3
unpinCancelCid = test.TestCid2
pinCancelCid = test.Cid3
unpinCancelCid = test.Cid2
ErrPinCancelCid = errors.New("should not have received rpc.IPFSPin operation")
ErrUnpinCancelCid = errors.New("should not have received rpc.IPFSUnpin operation")
pinOpts = api.PinOptions{
@ -39,23 +39,21 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c
}
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
case test.TestSlowCid1:
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.SlowCid1.String():
time.Sleep(2 * time.Second)
case pinCancelCid:
case pinCancelCid.String():
return ErrPinCancelCid
}
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.ToPin().Cid
switch c.String() {
case test.TestSlowCid1:
func (mock *mockService) IPFSUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
switch in.Cid.String() {
case test.SlowCid1.String():
time.Sleep(2 * time.Second)
case unpinCancelCid:
case unpinCancelCid.String():
return ErrUnpinCancelCid
}
return nil
@ -63,15 +61,15 @@ func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *s
func (mock *mockService) IPFSPinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
m := map[string]api.IPFSPinStatus{
test.TestCid1: api.IPFSPinStatusRecursive,
test.Cid1.String(): api.IPFSPinStatusRecursive,
}
*out = m
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
switch in.Cid {
case test.TestCid1, test.TestCid2:
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
switch in.String() {
case test.Cid1.String(), test.Cid2.String():
*out = api.IPFSPinStatusRecursive
default:
*out = api.IPFSPinStatusUnpinned
@ -79,20 +77,20 @@ func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
*out = []api.PinSerial{
api.PinWithOpts(test.MustDecodeCid(test.TestCid1), pinOpts).ToSerial(),
api.PinWithOpts(test.MustDecodeCid(test.TestCid3), pinOpts).ToSerial(),
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
*out = []*api.Pin{
api.PinWithOpts(test.Cid1, pinOpts),
api.PinWithOpts(test.Cid3, pinOpts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
case test.ErrorCid:
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case test.ErrorCid.String():
return errors.New("expected error when using ErrorCid")
case test.TestCid1, test.TestCid2:
*out = api.PinWithOpts(test.MustDecodeCid(in.Cid), pinOpts).ToSerial()
case test.Cid1.String(), test.Cid2.String():
*out = *api.PinWithOpts(in, pinOpts)
return nil
default:
return errors.New("not found")
@ -103,7 +101,7 @@ func testSlowStatelessPinTracker(t *testing.T) *Tracker {
cfg := &Config{}
cfg.Default()
cfg.ConcurrentPins = 1
mpt := New(cfg, test.TestPeerID1, test.TestPeerName1)
mpt := New(cfg, test.PeerID1, test.PeerName1)
mpt.SetClient(mockRPCClient(t))
return mpt
}
@ -112,7 +110,7 @@ func testStatelessPinTracker(t testing.TB) *Tracker {
cfg := &Config{}
cfg.Default()
cfg.ConcurrentPins = 1
spt := New(cfg, test.TestPeerID1, test.TestPeerName1)
spt := New(cfg, test.PeerID1, test.PeerName1)
spt.SetClient(test.NewMockRPCClient(t))
return spt
}
@ -141,7 +139,7 @@ func TestUntrackTrack(t *testing.T) {
spt := testStatelessPinTracker(t)
defer spt.Shutdown(ctx)
h1 := test.MustDecodeCid(test.TestCid1)
h1 := test.Cid1
// LocalPin
c := api.PinWithOpts(h1, pinOpts)
@ -164,7 +162,7 @@ func TestTrackUntrackWithCancel(t *testing.T) {
spt := testSlowStatelessPinTracker(t)
defer spt.Shutdown(ctx)
slowPinCid := test.MustDecodeCid(test.TestSlowCid1)
slowPinCid := test.SlowCid1
// LocalPin
slowPin := api.PinWithOpts(slowPinCid, pinOpts)
@ -209,8 +207,8 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
spt := testSlowStatelessPinTracker(t)
defer spt.Shutdown(ctx)
slowPinCid := test.MustDecodeCid(test.TestSlowCid1)
fastPinCid := test.MustDecodeCid(pinCancelCid)
slowPinCid := test.SlowCid1
fastPinCid := pinCancelCid
// SlowLocalPin
slowPin := api.PinWithOpts(slowPinCid, pinOpts)
@ -260,7 +258,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
spt := testSlowStatelessPinTracker(t)
defer spt.Shutdown(ctx)
slowPinCid := test.MustDecodeCid(test.TestSlowCid1)
slowPinCid := test.SlowCid1
// LocalPin
slowPin := api.PinWithOpts(slowPinCid, pinOpts)
@ -310,8 +308,8 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
spt := testStatelessPinTracker(t)
defer spt.Shutdown(ctx)
slowPinCid := test.MustDecodeCid(test.TestSlowCid1)
fastPinCid := test.MustDecodeCid(unpinCancelCid)
slowPinCid := test.SlowCid1
fastPinCid := unpinCancelCid
// SlowLocalPin
slowPin := api.PinWithOpts(slowPinCid, pinOpts)
@ -361,7 +359,7 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
}
}
var sortPinInfoByCid = func(p []api.PinInfo) {
var sortPinInfoByCid = func(p []*api.PinInfo) {
sort.Slice(p, func(i, j int) bool {
return p[i].Cid.String() < p[j].Cid.String()
})
@ -375,25 +373,25 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
tests := []struct {
name string
args args
want []api.PinInfo
want []*api.PinInfo
wantErr bool
}{
{
"basic stateless syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},
@ -403,18 +401,18 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
"slow stateless syncall",
args{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
test.Cid1,
test.Cid2,
},
testSlowStatelessPinTracker(t),
},
[]api.PinInfo{
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid1),
[]*api.PinInfo{
{
Cid: test.Cid1,
Status: api.TrackerStatusPinned,
},
api.PinInfo{
Cid: test.MustDecodeCid(test.TestCid2),
{
Cid: test.Cid2,
Status: api.TrackerStatusPinned,
},
},

View File

@ -8,7 +8,7 @@ import (
// IsRemotePin determines whether a Pin's ReplicationFactor has
// been met, so as to either pin or unpin it from the peer.
func IsRemotePin(c api.Pin, pid peer.ID) bool {
func IsRemotePin(c *api.Pin, pid peer.ID) bool {
if c.ReplicationFactorMax < 0 {
return false
}

View File

@ -99,18 +99,18 @@ func (pm *Manager) RmPeer(pid peer.ID) error {
// if the peer has dns addresses, return only those, otherwise
// return all. In all cases, encapsulate the peer ID.
func (pm *Manager) filteredPeerAddrs(p peer.ID) []ma.Multiaddr {
func (pm *Manager) filteredPeerAddrs(p peer.ID) []api.Multiaddr {
all := pm.host.Peerstore().Addrs(p)
peerAddrs := []ma.Multiaddr{}
peerDNSAddrs := []ma.Multiaddr{}
peerAddrs := []api.Multiaddr{}
peerDNSAddrs := []api.Multiaddr{}
peerPart, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", peer.IDB58Encode(p)))
for _, a := range all {
encAddr := a.Encapsulate(peerPart)
if madns.Matches(encAddr) {
peerDNSAddrs = append(peerDNSAddrs, encAddr)
peerDNSAddrs = append(peerDNSAddrs, api.NewMultiaddrWithValue(encAddr))
} else {
peerAddrs = append(peerAddrs, encAddr)
peerAddrs = append(peerAddrs, api.NewMultiaddrWithValue(encAddr))
}
}
@ -125,7 +125,7 @@ func (pm *Manager) filteredPeerAddrs(p peer.ID) []ma.Multiaddr {
// /ipfs/<peerID> part) for the given set of peers. For peers for which
// we know DNS multiaddresses, we only return those. Otherwise, we return
// all the multiaddresses known for that peer.
func (pm *Manager) PeersAddresses(peers []peer.ID) []ma.Multiaddr {
func (pm *Manager) PeersAddresses(peers []peer.ID) []api.Multiaddr {
if pm.host == nil {
return nil
}
@ -134,7 +134,7 @@ func (pm *Manager) PeersAddresses(peers []peer.ID) []ma.Multiaddr {
return nil
}
var addrs []ma.Multiaddr
var addrs []api.Multiaddr
for _, p := range peers {
if p == pm.host.ID() {
continue
@ -200,7 +200,7 @@ func (pm *Manager) LoadPeerstore() (addrs []ma.Multiaddr) {
// SavePeerstore stores a slice of multiaddresses in the peerstore file, one
// per line.
func (pm *Manager) SavePeerstore(addrs []ma.Multiaddr) {
func (pm *Manager) SavePeerstore(addrs []api.Multiaddr) {
if pm.peerstorePath == "" {
return
}
@ -220,7 +220,7 @@ func (pm *Manager) SavePeerstore(addrs []ma.Multiaddr) {
defer f.Close()
for _, a := range addrs {
f.Write([]byte(fmt.Sprintf("%s\n", a.String())))
f.Write([]byte(fmt.Sprintf("%s\n", a.Value().String())))
}
}

View File

@ -7,6 +7,8 @@ import (
"go.opencensus.io/trace"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
)
@ -26,56 +28,57 @@ type RPCAPI struct {
*/
// ID runs Cluster.ID()
func (rpcapi *RPCAPI) ID(ctx context.Context, in struct{}, out *api.IDSerial) error {
id := rpcapi.c.ID(ctx).ToSerial()
*out = id
func (rpcapi *RPCAPI) ID(ctx context.Context, in struct{}, out *api.ID) error {
id := rpcapi.c.ID(ctx)
*out = *id
return nil
}
// Pin runs Cluster.Pin().
func (rpcapi *RPCAPI) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
return rpcapi.c.Pin(ctx, in.ToPin())
func (rpcapi *RPCAPI) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
return rpcapi.c.Pin(ctx, in)
}
// Unpin runs Cluster.Unpin().
func (rpcapi *RPCAPI) Unpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.DecodeCid()
return rpcapi.c.Unpin(ctx, c)
func (rpcapi *RPCAPI) Unpin(ctx context.Context, in *api.Pin, out *struct{}) error {
return rpcapi.c.Unpin(ctx, in.Cid)
}
// PinPath resolves path into a cid and runs Cluster.Pin().
func (rpcapi *RPCAPI) PinPath(ctx context.Context, in api.PinPath, out *api.PinSerial) error {
func (rpcapi *RPCAPI) PinPath(ctx context.Context, in *api.PinPath, out *api.Pin) error {
pin, err := rpcapi.c.PinPath(ctx, in)
*out = pin.ToSerial()
if err != nil {
return err
}
*out = *pin
return nil
}
// UnpinPath resolves path into a cid and runs Cluster.Unpin().
func (rpcapi *RPCAPI) UnpinPath(ctx context.Context, in string, out *api.PinSerial) error {
func (rpcapi *RPCAPI) UnpinPath(ctx context.Context, in string, out *api.Pin) error {
pin, err := rpcapi.c.UnpinPath(ctx, in)
*out = pin.ToSerial()
if err != nil {
return err
}
*out = *pin
return nil
}
// Pins runs Cluster.Pins().
func (rpcapi *RPCAPI) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
func (rpcapi *RPCAPI) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
cidList := rpcapi.c.Pins(ctx)
cidSerialList := make([]api.PinSerial, 0, len(cidList))
for _, c := range cidList {
cidSerialList = append(cidSerialList, c.ToSerial())
}
*out = cidSerialList
*out = cidList
return nil
}
// PinGet runs Cluster.PinGet().
func (rpcapi *RPCAPI) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
cidarg := in.ToPin()
pin, err := rpcapi.c.PinGet(ctx, cidarg.Cid)
if err == nil {
*out = pin.ToSerial()
}
func (rpcapi *RPCAPI) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
pin, err := rpcapi.c.PinGet(ctx, in)
if err != nil {
return err
}
*out = *pin
return nil
}
// Version runs Cluster.Version().
@ -87,29 +90,29 @@ func (rpcapi *RPCAPI) Version(ctx context.Context, in struct{}, out *api.Version
}
// Peers runs Cluster.Peers().
func (rpcapi *RPCAPI) Peers(ctx context.Context, in struct{}, out *[]api.IDSerial) error {
peers := rpcapi.c.Peers(ctx)
var sPeers []api.IDSerial
for _, p := range peers {
sPeers = append(sPeers, p.ToSerial())
}
*out = sPeers
func (rpcapi *RPCAPI) Peers(ctx context.Context, in struct{}, out *[]*api.ID) error {
*out = rpcapi.c.Peers(ctx)
return nil
}
// PeerAdd runs Cluster.PeerAdd().
func (rpcapi *RPCAPI) PeerAdd(ctx context.Context, in string, out *api.IDSerial) error {
pid, _ := peer.IDB58Decode(in)
id, err := rpcapi.c.PeerAdd(ctx, pid)
*out = id.ToSerial()
func (rpcapi *RPCAPI) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error {
id, err := rpcapi.c.PeerAdd(ctx, in)
if err != nil {
return err
}
*out = *id
return nil
}
// ConnectGraph runs Cluster.GetConnectGraph().
func (rpcapi *RPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraphSerial) error {
func (rpcapi *RPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error {
graph, err := rpcapi.c.ConnectGraph()
*out = graph.ToSerial()
if err != nil {
return err
}
*out = graph
return nil
}
// PeerRemove runs Cluster.PeerRm().
@ -118,112 +121,130 @@ func (rpcapi *RPCAPI) PeerRemove(ctx context.Context, in peer.ID, out *struct{})
}
// Join runs Cluster.Join().
func (rpcapi *RPCAPI) Join(ctx context.Context, in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := rpcapi.c.Join(ctx, addr)
return err
func (rpcapi *RPCAPI) Join(ctx context.Context, in api.Multiaddr, out *struct{}) error {
return rpcapi.c.Join(ctx, in.Value())
}
// StatusAll runs Cluster.StatusAll().
func (rpcapi *RPCAPI) StatusAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (rpcapi *RPCAPI) StatusAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.StatusAll(ctx)
*out = GlobalPinInfoSliceToSerial(pinfos)
if err != nil {
return err
}
*out = pinfos
return nil
}
// StatusAllLocal runs Cluster.StatusAllLocal().
func (rpcapi *RPCAPI) StatusAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) StatusAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos := rpcapi.c.StatusAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
*out = pinfos
return nil
}
// Status runs Cluster.Status().
func (rpcapi *RPCAPI) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Status(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Status(ctx, in)
if err != nil {
return err
}
*out = *pinfo
return nil
}
// StatusLocal runs Cluster.StatusLocal().
func (rpcapi *RPCAPI) StatusLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo := rpcapi.c.StatusLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) StatusLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo := rpcapi.c.StatusLocal(ctx, in)
*out = *pinfo
return nil
}
// SyncAll runs Cluster.SyncAll().
func (rpcapi *RPCAPI) SyncAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (rpcapi *RPCAPI) SyncAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.SyncAll(ctx)
*out = GlobalPinInfoSliceToSerial(pinfos)
if err != nil {
return err
}
*out = pinfos
return nil
}
// SyncAllLocal runs Cluster.SyncAllLocal().
func (rpcapi *RPCAPI) SyncAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) SyncAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos, err := rpcapi.c.SyncAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
if err != nil {
return err
}
*out = pinfos
return nil
}
// Sync runs Cluster.Sync().
func (rpcapi *RPCAPI) Sync(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Sync(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Sync(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Sync(ctx, in)
if err != nil {
return err
}
*out = *pinfo
return nil
}
// SyncLocal runs Cluster.SyncLocal().
func (rpcapi *RPCAPI) SyncLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.SyncLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) SyncLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo, err := rpcapi.c.SyncLocal(ctx, in)
if err != nil {
return err
}
*out = *pinfo
return nil
}
// RecoverAllLocal runs Cluster.RecoverAllLocal().
func (rpcapi *RPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
pinfos, err := rpcapi.c.RecoverAllLocal(ctx)
*out = pinInfoSliceToSerial(pinfos)
if err != nil {
return err
}
*out = pinfos
return nil
}
// Recover runs Cluster.Recover().
func (rpcapi *RPCAPI) Recover(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.Recover(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) Recover(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
pinfo, err := rpcapi.c.Recover(ctx, in)
if err != nil {
return err
}
*out = *pinfo
return nil
}
// RecoverLocal runs Cluster.RecoverLocal().
func (rpcapi *RPCAPI) RecoverLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
c := in.DecodeCid()
pinfo, err := rpcapi.c.RecoverLocal(ctx, c)
*out = pinfo.ToSerial()
func (rpcapi *RPCAPI) RecoverLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
pinfo, err := rpcapi.c.RecoverLocal(ctx, in)
if err != nil {
return err
}
*out = *pinfo
return nil
}
// BlockAllocate returns allocations for blocks. This is used in the adders.
// It's different from pin allocations when ReplicationFactor < 0.
func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
pin := in.ToPin()
err := rpcapi.c.setupPin(ctx, &pin)
func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
err := rpcapi.c.setupPin(ctx, in)
if err != nil {
return err
}
// Return the current peer list.
if pin.ReplicationFactorMin < 0 {
if in.ReplicationFactorMin < 0 {
// Returned metrics are Valid and belong to current
// Cluster peers.
metrics := rpcapi.c.monitor.LatestMetrics(ctx, pingMetricName)
peers := make([]string, len(metrics), len(metrics))
peers := make([]peer.ID, len(metrics), len(metrics))
for i, m := range metrics {
peers[i] = peer.IDB58Encode(m.Peer)
peers[i] = m.Peer
}
*out = peers
@ -232,9 +253,9 @@ func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *
allocs, err := rpcapi.c.allocate(
ctx,
pin.Cid,
pin.ReplicationFactorMin,
pin.ReplicationFactorMax,
in.Cid,
in.ReplicationFactorMin,
in.ReplicationFactorMax,
[]peer.ID{}, // blacklist
[]peer.ID{}, // prio list
)
@ -243,15 +264,18 @@ func (rpcapi *RPCAPI) BlockAllocate(ctx context.Context, in api.PinSerial, out *
return err
}
*out = api.PeersToStrings(allocs)
*out = allocs
return nil
}
// SendInformerMetric runs Cluster.sendInformerMetric().
func (rpcapi *RPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *api.Metric) error {
m, err := rpcapi.c.sendInformerMetric(ctx)
*out = m
if err != nil {
return err
}
*out = *m
return nil
}
/*
@ -259,54 +283,54 @@ func (rpcapi *RPCAPI) SendInformerMetric(ctx context.Context, in struct{}, out *
*/
// Track runs PinTracker.Track().
func (rpcapi *RPCAPI) Track(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Track(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Track")
defer span.End()
return rpcapi.c.tracker.Track(ctx, in.ToPin())
return rpcapi.c.tracker.Track(ctx, in)
}
// Untrack runs PinTracker.Untrack().
func (rpcapi *RPCAPI) Untrack(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Untrack(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Untrack")
defer span.End()
c := in.DecodeCid()
return rpcapi.c.tracker.Untrack(ctx, c)
return rpcapi.c.tracker.Untrack(ctx, in.Cid)
}
// TrackerStatusAll runs PinTracker.StatusAll().
func (rpcapi *RPCAPI) TrackerStatusAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerStatusAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll")
defer span.End()
*out = pinInfoSliceToSerial(rpcapi.c.tracker.StatusAll(ctx))
*out = rpcapi.c.tracker.StatusAll(ctx)
return nil
}
// TrackerStatus runs PinTracker.Status().
func (rpcapi *RPCAPI) TrackerStatus(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerStatus(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Status")
defer span.End()
c := in.DecodeCid()
pinfo := rpcapi.c.tracker.Status(ctx, c)
*out = pinfo.ToSerial()
pinfo := rpcapi.c.tracker.Status(ctx, in)
*out = *pinfo
return nil
}
// TrackerRecoverAll runs PinTracker.RecoverAll().f
func (rpcapi *RPCAPI) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll")
defer span.End()
pinfos, err := rpcapi.c.tracker.RecoverAll(ctx)
*out = pinInfoSliceToSerial(pinfos)
if err != nil {
return err
}
*out = pinfos
return nil
}
// TrackerRecover runs PinTracker.Recover().
func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/Recover")
defer span.End()
c := in.DecodeCid()
pinfo, err := rpcapi.c.tracker.Recover(ctx, c)
*out = pinfo.ToSerial()
pinfo, err := rpcapi.c.tracker.Recover(ctx, in)
*out = *pinfo
return err
}
@ -315,33 +339,35 @@ func (rpcapi *RPCAPI) TrackerRecover(ctx context.Context, in api.PinSerial, out
*/
// IPFSPin runs IPFSConnector.Pin().
func (rpcapi *RPCAPI) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/ipfsconn/IPFSPin")
defer span.End()
c := in.DecodeCid()
depth := in.ToPin().MaxDepth
return rpcapi.c.ipfs.Pin(ctx, c, depth)
return rpcapi.c.ipfs.Pin(ctx, in.Cid, in.MaxDepth)
}
// IPFSUnpin runs IPFSConnector.Unpin().
func (rpcapi *RPCAPI) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
c := in.DecodeCid()
return rpcapi.c.ipfs.Unpin(ctx, c)
func (rpcapi *RPCAPI) IPFSUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
return rpcapi.c.ipfs.Unpin(ctx, in.Cid)
}
// IPFSPinLsCid runs IPFSConnector.PinLsCid().
func (rpcapi *RPCAPI) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
c := in.DecodeCid()
b, err := rpcapi.c.ipfs.PinLsCid(ctx, c)
*out = b
func (rpcapi *RPCAPI) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
b, err := rpcapi.c.ipfs.PinLsCid(ctx, in)
if err != nil {
return err
}
*out = b
return nil
}
// IPFSPinLs runs IPFSConnector.PinLs().
func (rpcapi *RPCAPI) IPFSPinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
m, err := rpcapi.c.ipfs.PinLs(ctx, in)
*out = m
if err != nil {
return err
}
*out = m
return nil
}
// IPFSConnectSwarms runs IPFSConnector.ConnectSwarms().
@ -353,35 +379,46 @@ func (rpcapi *RPCAPI) IPFSConnectSwarms(ctx context.Context, in struct{}, out *s
// IPFSConfigKey runs IPFSConnector.ConfigKey().
func (rpcapi *RPCAPI) IPFSConfigKey(ctx context.Context, in string, out *interface{}) error {
res, err := rpcapi.c.ipfs.ConfigKey(in)
*out = res
if err != nil {
return err
}
*out = res
return nil
}
// IPFSRepoStat runs IPFSConnector.RepoStat().
func (rpcapi *RPCAPI) IPFSRepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error {
res, err := rpcapi.c.ipfs.RepoStat(ctx)
*out = res
if err != nil {
return err
}
*out = *res
return err
}
// IPFSSwarmPeers runs IPFSConnector.SwarmPeers().
func (rpcapi *RPCAPI) IPFSSwarmPeers(ctx context.Context, in struct{}, out *api.SwarmPeersSerial) error {
func (rpcapi *RPCAPI) IPFSSwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
res, err := rpcapi.c.ipfs.SwarmPeers(ctx)
*out = res.ToSerial()
if err != nil {
return err
}
*out = res
return nil
}
// IPFSBlockPut runs IPFSConnector.BlockPut().
func (rpcapi *RPCAPI) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
func (rpcapi *RPCAPI) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
return rpcapi.c.ipfs.BlockPut(ctx, in)
}
// IPFSBlockGet runs IPFSConnector.BlockGet().
func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in api.PinSerial, out *[]byte) error {
c := in.DecodeCid()
res, err := rpcapi.c.ipfs.BlockGet(ctx, c)
*out = res
func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in cid.Cid, out *[]byte) error {
res, err := rpcapi.c.ipfs.BlockGet(ctx, in)
if err != nil {
return err
}
*out = res
return nil
}
/*
@ -389,19 +426,17 @@ func (rpcapi *RPCAPI) IPFSBlockGet(ctx context.Context, in api.PinSerial, out *[
*/
// ConsensusLogPin runs Consensus.LogPin().
func (rpcapi *RPCAPI) ConsensusLogPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogPin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogPin")
defer span.End()
c := in.ToPin()
return rpcapi.c.consensus.LogPin(ctx, c)
return rpcapi.c.consensus.LogPin(ctx, in)
}
// ConsensusLogUnpin runs Consensus.LogUnpin().
func (rpcapi *RPCAPI) ConsensusLogUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogUnpin")
defer span.End()
c := in.ToPin()
return rpcapi.c.consensus.LogUnpin(ctx, c)
return rpcapi.c.consensus.LogUnpin(ctx, in)
}
// ConsensusAddPeer runs Consensus.AddPeer().
@ -421,8 +456,11 @@ func (rpcapi *RPCAPI) ConsensusRmPeer(ctx context.Context, in peer.ID, out *stru
// ConsensusPeers runs Consensus.Peers().
func (rpcapi *RPCAPI) ConsensusPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
peers, err := rpcapi.c.consensus.Peers(ctx)
*out = peers
if err != nil {
return err
}
*out = peers
return nil
}
/*
@ -430,13 +468,12 @@ func (rpcapi *RPCAPI) ConsensusPeers(ctx context.Context, in struct{}, out *[]pe
*/
// PeerMonitorLogMetric runs PeerMonitor.LogMetric().
func (rpcapi *RPCAPI) PeerMonitorLogMetric(ctx context.Context, in api.Metric, out *struct{}) error {
rpcapi.c.monitor.LogMetric(ctx, in)
return nil
func (rpcapi *RPCAPI) PeerMonitorLogMetric(ctx context.Context, in *api.Metric, out *struct{}) error {
return rpcapi.c.monitor.LogMetric(ctx, in)
}
// PeerMonitorLatestMetrics runs PeerMonitor.LatestMetrics().
func (rpcapi *RPCAPI) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]api.Metric) error {
func (rpcapi *RPCAPI) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]*api.Metric) error {
*out = rpcapi.c.monitor.LatestMetrics(ctx, in)
return nil
}

View File

@ -68,21 +68,22 @@ func CopyPIDsToIfaces(in []peer.ID) []interface{} {
return ifaces
}
// CopyIDSerialsToIfaces converts an api.IDSerial slice to an empty interface
// CopyIDsToIfaces converts an api.ID slice to an empty interface
// slice using pointers to each elements of the original slice.
// Useful to handle gorpc.MultiCall() replies.
func CopyIDSerialsToIfaces(in []api.IDSerial) []interface{} {
func CopyIDsToIfaces(in []*api.ID) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
in[i] = &api.ID{}
ifaces[i] = in[i]
}
return ifaces
}
// CopyIDSerialSliceToIfaces converts an api.IDSerial slice of slices
// CopyIDSliceToIfaces converts an api.ID slice of slices
// to an empty interface slice using pointers to each elements of the
// original slice. Useful to handle gorpc.MultiCall() replies.
func CopyIDSerialSliceToIfaces(in [][]api.IDSerial) []interface{} {
func CopyIDSliceToIfaces(in [][]*api.ID) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -90,21 +91,22 @@ func CopyIDSerialSliceToIfaces(in [][]api.IDSerial) []interface{} {
return ifaces
}
// CopyPinInfoSerialToIfaces converts an api.PinInfoSerial slice to
// CopyPinInfoToIfaces converts an api.PinInfo slice to
// an empty interface slice using pointers to each elements of
// the original slice. Useful to handle gorpc.MultiCall() replies.
func CopyPinInfoSerialToIfaces(in []api.PinInfoSerial) []interface{} {
func CopyPinInfoToIfaces(in []*api.PinInfo) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
in[i] = &api.PinInfo{}
ifaces[i] = in[i]
}
return ifaces
}
// CopyPinInfoSerialSliceToIfaces converts an api.PinInfoSerial slice of slices
// CopyPinInfoSliceToIfaces converts an api.PinInfo slice of slices
// to an empty interface slice using pointers to each elements of the original
// slice. Useful to handle gorpc.MultiCall() replies.
func CopyPinInfoSerialSliceToIfaces(in [][]api.PinInfoSerial) []interface{} {
func CopyPinInfoSliceToIfaces(in [][]*api.PinInfo) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]

View File

@ -15,7 +15,7 @@ test_expect_success IPFS,CLUSTER "pin data to cluster with ctl" '
'
test_expect_success IPFS,CLUSTER "unpin data from cluster with ctl" '
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid" | head -1`
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid | .[\"/\"]" | head -1`
ipfs-cluster-ctl pin rm "$cid" &&
!(ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid") &&
ipfs-cluster-ctl status "$cid" | grep -q -i "UNPINNED"
@ -29,7 +29,7 @@ test_expect_success IPFS,CLUSTER "wait for data to pin to cluster with ctl" '
'
test_expect_success IPFS,CLUSTER "wait for data to unpin from cluster with ctl" '
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid" | head -1`
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid | .[\"/\"]" | head -1`
ipfs-cluster-ctl pin rm --wait "$cid" | grep -q -i "UNPINNED" &&
!(ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid") &&
ipfs-cluster-ctl status "$cid" | grep -q -i "UNPINNED"
@ -43,7 +43,7 @@ test_expect_success IPFS,CLUSTER "wait for data to pin to cluster with ctl with
'
test_expect_success IPFS,CLUSTER "wait for data to unpin from cluster with ctl with timeout" '
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid" | head -1`
cid=`ipfs-cluster-ctl --enc=json pin ls | jq -r ".[] | .cid | .[\"/\"]" | head -1`
ipfs-cluster-ctl pin rm --wait --wait-timeout 2s "$cid" | grep -q -i "UNPINNED" &&
!(ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid") &&
ipfs-cluster-ctl status "$cid" | grep -q -i "UNPINNED"

View File

@ -24,7 +24,7 @@ test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to exp
cluster_kill && sleep 5 &&
ipfs-cluster-service --debug --config "test-config" state export -f export.json &&
[ -f export.json ] &&
jq ".[].cid" export.json | grep -q "$cid"
jq -r ".[] | .cid | .[\"/\"]" export.json | grep -q "$cid"
'
test_clean_ipfs

View File

@ -1,6 +1,6 @@
[
{
"cid": "QmbrCtydGyPeHiLURSPMqrvE5mCgMCwFYq3UD4XLCeAYw6",
"cid": { "/": "QmbrCtydGyPeHiLURSPMqrvE5mCgMCwFYq3UD4XLCeAYw6"},
"name": "",
"allocations": [],
"replication_factor_min": -1,

View File

@ -64,11 +64,11 @@ func New(dstore ds.Datastore, namespace string, handle codec.Handle) (*State, er
}
// Add adds a new Pin or replaces an existing one.
func (st *State) Add(ctx context.Context, c api.Pin) error {
func (st *State) Add(ctx context.Context, c *api.Pin) error {
_, span := trace.StartSpan(ctx, "state/dsstate/Add")
defer span.End()
ps, err := st.serializePin(&c)
ps, err := st.serializePin(c)
if err != nil {
return err
}
@ -91,7 +91,7 @@ func (st *State) Rm(ctx context.Context, c cid.Cid) error {
// Get returns a Pin from the store and whether it
// was present. When not present, a default pin
// is returned.
func (st *State) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
func (st *State) Get(ctx context.Context, c cid.Cid) (*api.Pin, bool) {
_, span := trace.StartSpan(ctx, "state/dsstate/Get")
defer span.End()
@ -103,7 +103,7 @@ func (st *State) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
if err != nil {
return api.PinCid(c), false
}
return *p, true
return p, true
}
// Has returns whether a Cid is stored.
@ -120,7 +120,7 @@ func (st *State) Has(ctx context.Context, c cid.Cid) bool {
// List returns the unsorted list of all Pins that have been added to the
// datastore.
func (st *State) List(ctx context.Context) []api.Pin {
func (st *State) List(ctx context.Context) []*api.Pin {
_, span := trace.StartSpan(ctx, "state/dsstate/List")
defer span.End()
@ -130,11 +130,11 @@ func (st *State) List(ctx context.Context) []api.Pin {
results, err := st.ds.Query(q)
if err != nil {
return []api.Pin{}
return []*api.Pin{}
}
defer results.Close()
var pins []api.Pin
var pins []*api.Pin
for r := range results.Next() {
if r.Error != nil {
@ -155,7 +155,7 @@ func (st *State) List(ctx context.Context) []api.Pin {
continue
}
pins = append(pins, *p)
pins = append(pins, p)
}
return pins
}

View File

@ -16,15 +16,15 @@ import (
// objects which objects are pinned. This component should be thread safe.
type State interface {
// Add adds a pin to the State
Add(context.Context, api.Pin) error
Add(context.Context, *api.Pin) error
// Rm removes a pin from the State
Rm(context.Context, cid.Cid) error
// List lists all the pins in the state
List(context.Context) []api.Pin
List(context.Context) []*api.Pin
// Has returns true if the state is holding information for a Cid
Has(context.Context, cid.Cid) bool
// Get returns the information attacthed to this pin
Get(context.Context, cid.Cid) (api.Pin, bool)
Get(context.Context, cid.Cid) (*api.Pin, bool)
// Migrate restores the serialized format of an outdated state to the current version
Migrate(ctx context.Context, r io.Reader) error
// Return the version of this state

View File

@ -50,7 +50,7 @@ func NewMapState() state.State {
}
// Add adds a Pin to the internal map.
func (st *MapState) Add(ctx context.Context, c api.Pin) error {
func (st *MapState) Add(ctx context.Context, c *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "state/map/Add")
defer span.End()
return st.dst.Add(ctx, c)
@ -69,7 +69,7 @@ func (st *MapState) Rm(ctx context.Context, c cid.Cid) error {
// fields initialized, regardless of the
// presence of the provided Cid in the state.
// To check the presence, use MapState.Has(cid.Cid).
func (st *MapState) Get(ctx context.Context, c cid.Cid) (api.Pin, bool) {
func (st *MapState) Get(ctx context.Context, c cid.Cid) (*api.Pin, bool) {
ctx, span := trace.StartSpan(ctx, "state/map/Get")
defer span.End()
@ -84,7 +84,7 @@ func (st *MapState) Has(ctx context.Context, c cid.Cid) bool {
}
// List provides the list of tracked Pins.
func (st *MapState) List(ctx context.Context) []api.Pin {
func (st *MapState) List(ctx context.Context) []*api.Pin {
ctx, span := trace.StartSpan(ctx, "state/map/List")
defer span.End()
return st.dst.List(ctx)

View File

@ -16,7 +16,7 @@ import (
var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
var c = api.Pin{
var c = &api.Pin{
Cid: testCid1,
Type: api.DataType,
Allocations: []peer.ID{testPeerID1},

View File

@ -10,6 +10,8 @@ import (
"errors"
"io"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
msgpack "github.com/multiformats/go-multicodec/msgpack"
@ -200,18 +202,26 @@ func (st *mapStateV5) next() migrateable {
logger.Infof("migrating", k, v.Cid)
// we need to convert because we added codec struct fields
// and thus serialization is not the same.
p := api.PinSerial{}
p.Cid = v.Cid
p.Type = v.Type
p.Allocations = v.Allocations
p := &api.Pin{}
c, err := cid.Decode(v.Cid)
if err != nil {
logger.Error(err)
}
p.Cid = c
p.Type = api.PinType(v.Type)
p.Allocations = api.StringsToPeers(v.Allocations)
p.MaxDepth = v.MaxDepth
p.Reference = v.Reference
r, err := cid.Decode(v.Reference)
if err == nil {
p.Reference = &r
}
p.ReplicationFactorMax = v.ReplicationFactorMax
p.ReplicationFactorMin = v.ReplicationFactorMin
p.Name = v.Name
p.ShardSize = v.ShardSize
v6.Add(context.Background(), p.ToPin())
v6.Add(context.Background(), p)
}
return v6.(*MapState)
}

View File

@ -7,46 +7,39 @@ import (
// Common variables used all around tests.
var (
TestCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq"
TestCid2 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma"
TestCid3 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb"
TestCid4 = "zb2rhiKhUepkTMw7oFfBUnChAN7ABAvg2hXUwmTBtZ6yxuc57"
TestCid4Data = "Cid4Data" // Cid resulting from block put NOT ipfs add
TestSlowCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmd"
TestCidResolved = "zb2rhiKhUepkTMw7oFfBUnChAN7ABAvg2hXUwmTBtZ6yxuabc"
Cid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
Cid2, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma")
Cid3, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb")
Cid4, _ = cid.Decode("zb2rhiKhUepkTMw7oFfBUnChAN7ABAvg2hXUwmTBtZ6yxuc57")
Cid4Data = "Cid4Data" // Cid resulting from block put NOT ipfs add
SlowCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmd")
CidResolved, _ = cid.Decode("zb2rhiKhUepkTMw7oFfBUnChAN7ABAvg2hXUwmTBtZ6yxuabc")
// ErrorCid is meant to be used as a Cid which causes errors. i.e. the
// ipfs mock fails when pinning this CID.
ErrorCid = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc"
TestPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ = peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ = peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
TestPeerID4, _ = peer.IDB58Decode("QmZ8naDy5mEz4GLuQwjWt9MPYqHTBbsm8tQBrNSjiq6zBc")
TestPeerID5, _ = peer.IDB58Decode("QmZVAo3wd8s5eTTy2kPYs34J9PvfxpKPuYsePPYGjgRRjg")
TestPeerID6, _ = peer.IDB58Decode("QmR8Vu6kZk7JvAN2rWVWgiduHatgBq2bb15Yyq8RRhYSbx")
ErrorCid, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc")
PeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
PeerID2, _ = peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
PeerID3, _ = peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
PeerID4, _ = peer.IDB58Decode("QmZ8naDy5mEz4GLuQwjWt9MPYqHTBbsm8tQBrNSjiq6zBc")
PeerID5, _ = peer.IDB58Decode("QmZVAo3wd8s5eTTy2kPYs34J9PvfxpKPuYsePPYGjgRRjg")
PeerID6, _ = peer.IDB58Decode("QmR8Vu6kZk7JvAN2rWVWgiduHatgBq2bb15Yyq8RRhYSbx")
TestPeerName1 = "TestPeer1"
TestPeerName2 = "TestPeer2"
TestPeerName3 = "TestPeer3"
TestPeerName4 = "TestPeer4"
TestPeerName5 = "TestPeer5"
TestPeerName6 = "TestPeer6"
PeerName1 = "TestPeer1"
PeerName2 = "TestPeer2"
PeerName3 = "TestPeer3"
PeerName4 = "TestPeer4"
PeerName5 = "TestPeer5"
PeerName6 = "TestPeer6"
TestPathIPFS1 = "/ipfs/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY"
TestPathIPFS2 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif"
TestPathIPFS3 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif/"
TestPathIPNS1 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka"
TestPathIPNS2 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka/"
TestPathIPLD1 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY"
TestPathIPLD2 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/"
PathIPFS1 = "/ipfs/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY"
PathIPFS2 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif"
PathIPFS3 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif/"
PathIPNS1 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka"
PathIPNS2 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka/"
PathIPLD1 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY"
PathIPLD2 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/"
TestInvalidPath1 = "/invalidkeytype/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/"
TestInvalidPath2 = "/ipfs/invalidhash"
TestInvalidPath3 = "/ipfs/"
InvalidPath1 = "/invalidkeytype/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/"
InvalidPath2 = "/ipfs/invalidhash"
InvalidPath3 = "/ipfs/"
)
// MustDecodeCid provides a test helper that ignores
// errors from cid.Decode.
func MustDecodeCid(v string) cid.Cid {
c, _ := cid.Decode(v)
return c
}

View File

@ -139,7 +139,7 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
switch endp {
case "id":
resp := mockIDResp{
ID: TestPeerID1.Pretty(),
ID: PeerID1.Pretty(),
Addresses: []string{
"/ip4/0.0.0.0/tcp/1234",
},
@ -151,7 +151,7 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
if !ok {
goto ERROR
}
if arg == ErrorCid {
if arg == ErrorCid.String() {
goto ERROR
}
c, err := cid.Decode(arg)
@ -226,10 +226,10 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
w.Write(j)
case "swarm/peers":
peer1 := mockIpfsPeer{
Peer: TestPeerID4.Pretty(),
Peer: PeerID4.Pretty(),
}
peer2 := mockIpfsPeer{
Peer: TestPeerID5.Pretty(),
Peer: PeerID5.Pretty(),
}
resp := mockSwarmPeersResp{
Peers: []mockIpfsPeer{peer1, peer2},
@ -303,7 +303,7 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
j, _ := json.Marshal(resp)
w.Write(j)
case "resolve":
w.Write([]byte("{\"Path\":\"" + "/ipfs/" + TestCidResolved + "\"}"))
w.Write([]byte("{\"Path\":\"" + "/ipfs/" + CidResolved.String() + "\"}"))
case "config/show":
resp := mockConfigResp{
Datastore: struct {

View File

@ -39,86 +39,86 @@ func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client {
return c
}
func (mock *mockService) Pin(ctx context.Context, in api.PinSerial, out *struct{}) error {
if in.Cid == ErrorCid {
func (mock *mockService) Pin(ctx context.Context, in *api.Pin, out *struct{}) error {
if in.Cid.Equals(ErrorCid) {
return ErrBadCid
}
return nil
}
func (mock *mockService) Unpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
if in.Cid == ErrorCid {
func (mock *mockService) Unpin(ctx context.Context, in *api.Pin, out *struct{}) error {
if in.Cid.Equals(ErrorCid) {
return ErrBadCid
}
return nil
}
func (mock *mockService) PinPath(ctx context.Context, in api.PinPath, out *api.PinSerial) error {
func (mock *mockService) PinPath(ctx context.Context, in *api.PinPath, out *api.Pin) error {
_, err := gopath.ParsePath(in.Path)
if err != nil {
return err
}
*out = api.PinWithOpts(MustDecodeCid(TestCidResolved), in.PinOptions).ToSerial()
*out = *api.PinWithOpts(CidResolved, in.PinOptions)
return nil
}
func (mock *mockService) UnpinPath(ctx context.Context, in string, out *api.PinSerial) error {
func (mock *mockService) UnpinPath(ctx context.Context, in string, out *api.Pin) error {
_, err := gopath.ParsePath(in)
if err != nil {
return err
}
*out = api.PinCid(MustDecodeCid(TestCidResolved)).ToSerial()
*out = *api.PinCid(CidResolved)
return nil
}
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]api.PinSerial) error {
func (mock *mockService) Pins(ctx context.Context, in struct{}, out *[]*api.Pin) error {
opts := api.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
}
*out = []api.PinSerial{
api.PinWithOpts(MustDecodeCid(TestCid1), opts).ToSerial(),
api.PinCid(MustDecodeCid(TestCid2)).ToSerial(),
api.PinWithOpts(MustDecodeCid(TestCid3), opts).ToSerial(),
*out = []*api.Pin{
api.PinWithOpts(Cid1, opts),
api.PinCid(Cid2),
api.PinWithOpts(Cid3, opts),
}
return nil
}
func (mock *mockService) PinGet(ctx context.Context, in api.PinSerial, out *api.PinSerial) error {
switch in.Cid {
case ErrorCid:
func (mock *mockService) PinGet(ctx context.Context, in cid.Cid, out *api.Pin) error {
switch in.String() {
case ErrorCid.String():
return errors.New("this is an expected error when using ErrorCid")
case TestCid1, TestCid3:
p := api.PinCid(MustDecodeCid(in.Cid)).ToSerial()
case Cid1.String(), Cid3.String():
p := api.PinCid(in)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1
*out = p
*out = *p
return nil
case TestCid2: // This is a remote pin
p := api.PinCid(MustDecodeCid(in.Cid)).ToSerial()
case Cid2.String(): // This is a remote pin
p := api.PinCid(in)
p.ReplicationFactorMin = 1
p.ReplicationFactorMax = 1
*out = p
*out = *p
default:
return errors.New("not found")
}
return nil
}
func (mock *mockService) ID(ctx context.Context, in struct{}, out *api.IDSerial) error {
func (mock *mockService) ID(ctx context.Context, in struct{}, out *api.ID) error {
//_, pubkey, _ := crypto.GenerateKeyPair(
// DefaultConfigCrypto,
// DefaultConfigKeyLength)
*out = api.IDSerial{
ID: TestPeerID1.Pretty(),
addr, _ := api.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/ipfs/" + PeerID1.Pretty())
*out = api.ID{
ID: PeerID1,
//PublicKey: pubkey,
Version: "0.0.mock",
IPFS: api.IPFSIDSerial{
ID: TestPeerID1.Pretty(),
Addresses: api.MultiaddrsSerial{
api.MultiaddrSerial("/ip4/127.0.0.1/tcp/4001/ipfs/" + TestPeerID1.Pretty()),
},
IPFS: api.IPFSID{
ID: PeerID1,
Addresses: []api.Multiaddr{addr},
},
}
return nil
@ -131,16 +131,16 @@ func (mock *mockService) Version(ctx context.Context, in struct{}, out *api.Vers
return nil
}
func (mock *mockService) Peers(ctx context.Context, in struct{}, out *[]api.IDSerial) error {
id := api.IDSerial{}
mock.ID(ctx, in, &id)
func (mock *mockService) Peers(ctx context.Context, in struct{}, out *[]*api.ID) error {
id := &api.ID{}
mock.ID(ctx, in, id)
*out = []api.IDSerial{id}
*out = []*api.ID{id}
return nil
}
func (mock *mockService) PeerAdd(ctx context.Context, in string, out *api.IDSerial) error {
id := api.IDSerial{}
func (mock *mockService) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error {
id := api.ID{}
mock.ID(ctx, struct{}{}, &id)
*out = id
return nil
@ -150,130 +150,127 @@ func (mock *mockService) PeerRemove(ctx context.Context, in peer.ID, out *struct
return nil
}
func (mock *mockService) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraphSerial) error {
*out = api.ConnectGraphSerial{
ClusterID: TestPeerID1.Pretty(),
IPFSLinks: map[string][]string{
TestPeerID4.Pretty(): []string{TestPeerID5.Pretty(), TestPeerID6.Pretty()},
TestPeerID5.Pretty(): []string{TestPeerID4.Pretty(), TestPeerID6.Pretty()},
TestPeerID6.Pretty(): []string{TestPeerID4.Pretty(), TestPeerID5.Pretty()},
func (mock *mockService) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error {
*out = api.ConnectGraph{
ClusterID: PeerID1,
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(PeerID4): []peer.ID{PeerID5, PeerID6},
peer.IDB58Encode(PeerID5): []peer.ID{PeerID4, PeerID6},
peer.IDB58Encode(PeerID6): []peer.ID{PeerID4, PeerID5},
},
ClusterLinks: map[string][]string{
TestPeerID1.Pretty(): []string{TestPeerID2.Pretty(), TestPeerID3.Pretty()},
TestPeerID2.Pretty(): []string{TestPeerID1.Pretty(), TestPeerID3.Pretty()},
TestPeerID3.Pretty(): []string{TestPeerID1.Pretty(), TestPeerID2.Pretty()},
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(PeerID1): []peer.ID{PeerID2, PeerID3},
peer.IDB58Encode(PeerID2): []peer.ID{PeerID1, PeerID3},
peer.IDB58Encode(PeerID3): []peer.ID{PeerID1, PeerID2},
},
ClustertoIPFS: map[string]string{
TestPeerID1.Pretty(): TestPeerID4.Pretty(),
TestPeerID2.Pretty(): TestPeerID5.Pretty(),
TestPeerID3.Pretty(): TestPeerID6.Pretty(),
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(PeerID1): PeerID4,
peer.IDB58Encode(PeerID2): PeerID5,
peer.IDB58Encode(PeerID3): PeerID6,
},
}
return nil
}
func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
c1, _ := cid.Decode(TestCid1)
c2, _ := cid.Decode(TestCid2)
c3, _ := cid.Decode(TestCid3)
*out = globalPinInfoSliceToSerial([]api.GlobalPinInfo{
func (mock *mockService) StatusAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
pid := peer.IDB58Encode(PeerID1)
*out = []*api.GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c1,
Peer: TestPeerID1,
Cid: Cid1,
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: Cid1,
Peer: PeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
},
{
Cid: c2,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c2,
Peer: TestPeerID1,
Cid: Cid2,
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: Cid2,
Peer: PeerID1,
Status: api.TrackerStatusPinning,
TS: time.Now(),
},
},
},
{
Cid: c3,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c3,
Peer: TestPeerID1,
Cid: Cid3,
PeerMap: map[string]*api.PinInfo{
pid: {
Cid: Cid3,
Peer: PeerID1,
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
},
},
})
}
return nil
}
func (mock *mockService) StatusAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) StatusAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.TrackerStatusAll(ctx, in, out)
}
func (mock *mockService) Status(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
if in.Cid == ErrorCid {
func (mock *mockService) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
if in.Equals(ErrorCid) {
return ErrBadCid
}
c1, _ := cid.Decode(TestCid1)
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c1,
Peer: TestPeerID1,
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(PeerID1): {
Cid: in,
Peer: PeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
}.ToSerial()
}
return nil
}
func (mock *mockService) StatusLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) StatusLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.TrackerStatus(ctx, in, out)
}
func (mock *mockService) SyncAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfoSerial) error {
func (mock *mockService) SyncAll(ctx context.Context, in struct{}, out *[]*api.GlobalPinInfo) error {
return mock.StatusAll(ctx, in, out)
}
func (mock *mockService) SyncAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) SyncAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.StatusAllLocal(ctx, in, out)
}
func (mock *mockService) Sync(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (mock *mockService) Sync(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
return mock.Status(ctx, in, out)
}
func (mock *mockService) SyncLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) SyncLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.StatusLocal(ctx, in, out)
}
func (mock *mockService) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
func (mock *mockService) RecoverAllLocal(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
return mock.TrackerRecoverAll(ctx, in, out)
}
func (mock *mockService) Recover(ctx context.Context, in api.PinSerial, out *api.GlobalPinInfoSerial) error {
func (mock *mockService) Recover(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
return mock.Status(ctx, in, out)
}
func (mock *mockService) RecoverLocal(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
func (mock *mockService) RecoverLocal(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
return mock.TrackerRecover(ctx, in, out)
}
func (mock *mockService) BlockAllocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (mock *mockService) BlockAllocate(ctx context.Context, in *api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("replMin too high: can only mock-allocate to 1")
}
*out = []string{""} // local peer
*out = in.Allocations
return nil
}
@ -283,99 +280,94 @@ func (mock *mockService) SendInformerMetric(ctx context.Context, in struct{}, ou
/* Tracker methods */
func (mock *mockService) Track(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) Track(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) Untrack(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) Untrack(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) TrackerStatusAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
c1, _ := cid.Decode(TestCid1)
c3, _ := cid.Decode(TestCid3)
*out = pinInfoSliceToSerial([]api.PinInfo{
func (mock *mockService) TrackerStatusAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
*out = []*api.PinInfo{
{
Cid: c1,
Peer: TestPeerID1,
Cid: Cid1,
Peer: PeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
{
Cid: c3,
Peer: TestPeerID1,
Cid: Cid3,
Peer: PeerID1,
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
})
}
return nil
}
func (mock *mockService) TrackerStatus(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
if in.Cid == ErrorCid {
func (mock *mockService) TrackerStatus(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
if in.Equals(ErrorCid) {
return ErrBadCid
}
c1, _ := cid.Decode(TestCid1)
*out = api.PinInfo{
Cid: c1,
Peer: TestPeerID2,
Cid: in,
Peer: PeerID2,
Status: api.TrackerStatusPinned,
TS: time.Now(),
}.ToSerial()
}
return nil
}
func (mock *mockService) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfoSerial) error {
*out = make([]api.PinInfoSerial, 0, 0)
func (mock *mockService) TrackerRecoverAll(ctx context.Context, in struct{}, out *[]*api.PinInfo) error {
*out = make([]*api.PinInfo, 0, 0)
return nil
}
func (mock *mockService) TrackerRecover(ctx context.Context, in api.PinSerial, out *api.PinInfoSerial) error {
in2 := in.ToPin()
func (mock *mockService) TrackerRecover(ctx context.Context, in cid.Cid, out *api.PinInfo) error {
*out = api.PinInfo{
Cid: in2.Cid,
Peer: TestPeerID1,
Cid: in,
Peer: PeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
}.ToSerial()
}
return nil
}
/* PeerMonitor methods */
// PeerMonitorLogMetric runs PeerMonitor.LogMetric().
func (mock *mockService) PeerMonitorLogMetric(ctx context.Context, in api.Metric, out *struct{}) error {
func (mock *mockService) PeerMonitorLogMetric(ctx context.Context, in *api.Metric, out *struct{}) error {
return nil
}
// PeerMonitorLatestMetrics runs PeerMonitor.LatestMetrics().
func (mock *mockService) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]api.Metric) error {
m := api.Metric{
func (mock *mockService) PeerMonitorLatestMetrics(ctx context.Context, in string, out *[]*api.Metric) error {
m := &api.Metric{
Name: "test",
Peer: TestPeerID1,
Peer: PeerID1,
Value: "0",
Valid: true,
}
m.SetTTL(2 * time.Second)
last := []api.Metric{m}
last := []*api.Metric{m}
*out = last
return nil
}
/* IPFSConnector methods */
func (mock *mockService) IPFSPin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) IPFSPin(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
func (mock *mockService) IPFSUnpin(ctx context.Context, in *api.Pin, out *struct{}) error {
return nil
}
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out *api.IPFSPinStatus) error {
if in.Cid == TestCid1 || in.Cid == TestCid3 {
func (mock *mockService) IPFSPinLsCid(ctx context.Context, in cid.Cid, out *api.IPFSPinStatus) error {
if in.Equals(Cid1) || in.Equals(Cid3) {
*out = api.IPFSPinStatusRecursive
} else {
*out = api.IPFSPinStatusUnpinned
@ -385,8 +377,8 @@ func (mock *mockService) IPFSPinLsCid(ctx context.Context, in api.PinSerial, out
func (mock *mockService) IPFSPinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
m := map[string]api.IPFSPinStatus{
TestCid1: api.IPFSPinStatusRecursive,
TestCid3: api.IPFSPinStatusRecursive,
Cid1.String(): api.IPFSPinStatusRecursive,
Cid3.String(): api.IPFSPinStatusRecursive,
}
*out = m
return nil
@ -396,8 +388,8 @@ func (mock *mockService) IPFSConnectSwarms(ctx context.Context, in struct{}, out
return nil
}
func (mock *mockService) IPFSSwarmPeers(ctx context.Context, in struct{}, out *api.SwarmPeersSerial) error {
*out = []string{TestPeerID2.Pretty(), TestPeerID3.Pretty()}
func (mock *mockService) IPFSSwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
*out = []peer.ID{PeerID2, PeerID3}
return nil
}
@ -421,7 +413,7 @@ func (mock *mockService) IPFSRepoStat(ctx context.Context, in struct{}, out *api
return nil
}
func (mock *mockService) IPFSBlockPut(ctx context.Context, in api.NodeWithMeta, out *struct{}) error {
func (mock *mockService) IPFSBlockPut(ctx context.Context, in *api.NodeWithMeta, out *struct{}) error {
return nil
}
@ -434,24 +426,6 @@ func (mock *mockService) ConsensusRmPeer(ctx context.Context, in peer.ID, out *s
}
func (mock *mockService) ConsensusPeers(ctx context.Context, in struct{}, out *[]peer.ID) error {
*out = []peer.ID{TestPeerID1, TestPeerID2, TestPeerID3}
*out = []peer.ID{PeerID1, PeerID2, PeerID3}
return nil
}
// FIXME: dup from util.go
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
// FIXME: dup from util.go
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}

View File

@ -9,6 +9,8 @@ import (
"testing"
files "github.com/ipfs/go-ipfs-files"
cid "github.com/ipfs/go-cid"
)
const shardingTestDir = "shardTesting"
@ -59,8 +61,8 @@ var (
}
// Used for testing blockput/blockget
TestShardCid = "zdpuAoiNm1ntWx6jpgcReTiCWFHJSTpvTw4bAAn9p6yDnznqh"
TestShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a")
ShardCid, _ = cid.Decode("zdpuAoiNm1ntWx6jpgcReTiCWFHJSTpvTw4bAAn9p6yDnznqh")
ShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a")
)
// ShardingTestHelper helps generating files and folders to test adding and

29
util.go
View File

@ -7,7 +7,6 @@ import (
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
@ -52,34 +51,6 @@ func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID {
// return addrs
// }
// If we have connections open to that PID and they are using a different addr
// then we return the one we are using, otherwise the one provided
func getRemoteMultiaddr(h host.Host, pid peer.ID, addr ma.Multiaddr) ma.Multiaddr {
conns := h.Network().ConnsToPeer(pid)
if len(conns) > 0 {
return api.MustLibp2pMultiaddrJoin(conns[0].RemoteMultiaddr(), pid)
}
return api.MustLibp2pMultiaddrJoin(addr, pid)
}
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}
// GlobalPinInfoSliceToSerial is a helper function for serializing a slice of
// api.GlobalPinInfos.
func GlobalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
func logError(fmtstr string, args ...interface{}) error {
msg := fmt.Sprintf(fmtstr, args...)
logger.Error(msg)