ipfs-cluster/cluster_test.go
Hector Sanjuan 1eade4ae58 Fix #732: Introduce native pin/update
This introduces a pin/update operation which allows to Pin a new item to
cluster indicating that said pin is an update to an already-existing pin.

When this is the case, all the configuration for the existing pin is copied to
the new one (including allocations). The IPFS connector will then trigger
pin/update directly in IPFS, allowing an efficient pinning based on
DAG-differences. Since the allocations where the same for both pins,
the pin/update can proceed.

PinUpdate does not unpin the previous pin (it is not possible to do this
atomically in cluster like it happens in IPFS). The user can manually do it
after the pin/update is done.

Internally, after a lot of deliberations on what the optimal way for this is,
I opted for adding a `PinUpdate` option to the `PinOptions` type (carries the
CID to update from). In order to carry this option from the REST API to the
IPFS Connector, it is serialized in the Protobuf (and stored in the
datastore). There is no other way to do this in a simple fashion since the Pin
object is piece of information that is sent around.

Additionally, making it a PinOption plays well with the Pin/PinPath APIs which
need little changes. Effectively, you are pinning a new thing. You are just
indicating that it should be configured from an existing one.

Fixes #732
2019-08-09 16:11:52 +02:00

924 lines
22 KiB
Go

package ipfscluster
import (
"context"
"errors"
"fmt"
"mime/multipart"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/adder/sharding"
"github.com/ipfs/ipfs-cluster/allocator/ascendalloc"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/state"
"github.com/ipfs/ipfs-cluster/test"
"github.com/ipfs/ipfs-cluster/version"
cid "github.com/ipfs/go-cid"
gopath "github.com/ipfs/go-path"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
type mockComponent struct {
rpcClient *rpc.Client
}
func (c *mockComponent) Shutdown(ctx context.Context) error {
return nil
}
func (c *mockComponent) SetClient(client *rpc.Client) {
c.rpcClient = client
return
}
type mockAPI struct {
mockComponent
}
type mockProxy struct {
mockComponent
}
type mockConnector struct {
mockComponent
pins sync.Map
blocks sync.Map
}
func (ipfs *mockConnector) ID(ctx context.Context) (*api.IPFSID, error) {
return &api.IPFSID{
ID: test.PeerID1,
}, nil
}
func (ipfs *mockConnector) Pin(ctx context.Context, pin *api.Pin) error {
ipfs.pins.Store(pin.Cid.String(), pin.MaxDepth)
return nil
}
func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error {
ipfs.pins.Delete(c.String())
return nil
}
func (ipfs *mockConnector) PinLsCid(ctx context.Context, c cid.Cid) (api.IPFSPinStatus, error) {
dI, ok := ipfs.pins.Load(c.String())
if !ok {
return api.IPFSPinStatusUnpinned, nil
}
depth := dI.(int)
if depth == 0 {
return api.IPFSPinStatusDirect, nil
}
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string]api.IPFSPinStatus, error) {
m := make(map[string]api.IPFSPinStatus)
var st api.IPFSPinStatus
ipfs.pins.Range(func(k, v interface{}) bool {
switch v.(int) {
case 0:
st = api.IPFSPinStatusDirect
default:
st = api.IPFSPinStatusRecursive
}
m[k.(string)] = st
return true
})
return m, nil
}
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
return []peer.ID{test.PeerID4, test.PeerID5}, nil
}
func (ipfs *mockConnector) RepoStat(ctx context.Context) (*api.IPFSRepoStat, error) {
return &api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil
}
func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (cid.Cid, error) {
_, err := gopath.ParsePath(path)
if err != nil {
return cid.Undef, err
}
return test.CidResolved, nil
}
func (ipfs *mockConnector) ConnectSwarms(ctx context.Context) error { return nil }
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
func (ipfs *mockConnector) BlockPut(ctx context.Context, nwm *api.NodeWithMeta) error {
ipfs.blocks.Store(nwm.Cid.String(), nwm.Data)
return nil
}
func (ipfs *mockConnector) BlockGet(ctx context.Context, c cid.Cid) ([]byte, error) {
d, ok := ipfs.blocks.Load(c.String())
if !ok {
return nil, errors.New("block not found")
}
return d.([]byte), nil
}
type mockTracer struct {
mockComponent
}
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, PinTracker) {
ident, clusterCfg, _, _, _, badgerCfg, raftCfg, crdtCfg, maptrackerCfg, statelesstrackerCfg, psmonCfg, _, _ := testingConfigs()
ctx := context.Background()
host, pubsub, dht := createHost(t, ident.PrivateKey, clusterCfg.Secret, clusterCfg.ListenAddr)
folder := filepath.Join(testsFolder, host.ID().Pretty())
cleanState()
clusterCfg.SetBaseDir(folder)
raftCfg.DataFolder = folder
badgerCfg.Folder = filepath.Join(folder, "badger")
api := &mockAPI{}
proxy := &mockProxy{}
ipfs := &mockConnector{}
tracker := makePinTracker(t, ident.ID, maptrackerCfg, statelesstrackerCfg, clusterCfg.Peername)
tracer := &mockTracer{}
store := makeStore(t, badgerCfg)
cons := makeConsensus(t, store, host, pubsub, dht, raftCfg, false, crdtCfg)
var peersF func(context.Context) ([]peer.ID, error)
if consensus == "raft" {
peersF = cons.Peers
}
psmonCfg.CheckInterval = 2 * time.Second
mon, err := pubsubmon.New(ctx, psmonCfg, pubsub, peersF)
if err != nil {
t.Fatal(err)
}
alloc := ascendalloc.NewAllocator()
numpinCfg := &numpin.Config{}
numpinCfg.Default()
inf, _ := numpin.NewInformer(numpinCfg)
ReadyTimeout = raftCfg.WaitForLeaderTimeout + 1*time.Second
cl, err := NewCluster(
ctx,
host,
dht,
clusterCfg,
store,
cons,
[]API{api, proxy},
ipfs,
tracker,
mon,
alloc,
inf,
tracer,
)
if err != nil {
t.Fatal("cannot create cluster:", err)
}
<-cl.Ready()
return cl, api, ipfs, tracker
}
func cleanState() {
os.RemoveAll(testsFolder)
}
func testClusterShutdown(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
err := cl.Shutdown(ctx)
if err != nil {
t.Error("cluster shutdown failed:", err)
}
cl.Shutdown(ctx)
cl, _, _, _ = testingCluster(t)
err = cl.Shutdown(ctx)
if err != nil {
t.Error("cluster shutdown failed:", err)
}
}
func TestClusterStateSync(t *testing.T) {
ctx := context.Background()
cleanState()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
c := test.Cid1
_, err := cl.Pin(ctx, c, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
err = cl.StateSync(ctx)
if err != nil {
t.Fatal("sync after pinning should have worked:", err)
}
// Modify state on the side so the sync does not
// happen on an empty slide
st, err := cl.consensus.State(ctx)
if err != nil {
t.Fatal(err)
}
st.(state.State).Rm(ctx, c)
err = cl.StateSync(ctx)
if err != nil {
t.Fatal("sync with recover should have worked:", err)
}
}
func TestClusterID(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
id := cl.ID(ctx)
if len(id.Addresses) == 0 {
t.Error("expected more addresses")
}
if id.ID == "" {
t.Error("expected a cluster ID")
}
if id.Version != version.Version.String() {
t.Error("version should match current version")
}
//if id.PublicKey == nil {
// t.Error("publicKey should not be empty")
//}
}
func TestClusterPin(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
c := test.Cid1
res, err := cl.Pin(ctx, c, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
if res.Type != api.DataType {
t.Error("unexpected pin type")
}
switch consensus {
case "crdt":
return
case "raft":
// test an error case
cl.consensus.Shutdown(ctx)
opts := api.PinOptions{
ReplicationFactorMax: 1,
ReplicationFactorMin: 1,
}
_, err = cl.Pin(ctx, c, opts)
if err == nil {
t.Error("expected an error but things worked")
}
}
}
func TestClusterPinPath(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
// test an error case
_, err = cl.PinPath(ctx, test.InvalidPath1, api.PinOptions{})
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestAddFile(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
t.Run("local", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
c, err := cl.AddFile(r, params)
if err != nil {
t.Fatal(err)
}
if c.String() != test.ShardingDirBalancedRootCID {
t.Fatal("unexpected root CID for local add")
}
pinDelay()
pin := cl.StatusLocal(ctx, c)
if pin.Error != "" {
t.Fatal(pin.Error)
}
if pin.Status != api.TrackerStatusPinned {
t.Error("cid should be pinned")
}
cl.Unpin(ctx, c) // unpin so we can pin the shard in next test
pinDelay()
})
t.Run("shard", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = true
params.Name = "testshard"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
c, err := cl.AddFile(r, params)
if err != nil {
t.Fatal(err)
}
if c.String() != test.ShardingDirBalancedRootCID {
t.Fatal("unexpected root CID for local add")
}
pinDelay()
// We know that this produces 14 shards.
sharding.VerifyShards(t, c, cl, cl.ipfs, 14)
})
}
func TestUnpinShard(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
params := api.DefaultAddParams()
params.Shard = true
params.Name = "testshard"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
root, err := cl.AddFile(r, params)
if err != nil {
t.Fatal(err)
}
pinDelay()
// We know that this produces 14 shards.
sharding.VerifyShards(t, root, cl, cl.ipfs, 14)
// skipping errors, VerifyShards has checked
pinnedCids := []cid.Cid{}
pinnedCids = append(pinnedCids, root)
metaPin, _ := cl.PinGet(ctx, root)
cDag, _ := cl.PinGet(ctx, *metaPin.Reference)
pinnedCids = append(pinnedCids, cDag.Cid)
cDagBlock, _ := cl.ipfs.BlockGet(ctx, cDag.Cid)
cDagNode, _ := sharding.CborDataToNode(cDagBlock, "cbor")
for _, l := range cDagNode.Links() {
pinnedCids = append(pinnedCids, l.Cid)
}
t.Run("unpin clusterdag should fail", func(t *testing.T) {
_, err := cl.Unpin(ctx, cDag.Cid)
if err == nil {
t.Fatal("should not allow unpinning the cluster DAG directly")
}
t.Log(err)
})
t.Run("unpin shard should fail", func(t *testing.T) {
_, err := cl.Unpin(ctx, cDagNode.Links()[0].Cid)
if err == nil {
t.Fatal("should not allow unpinning shards directly")
}
t.Log(err)
})
t.Run("normal unpin", func(t *testing.T) {
res, err := cl.Unpin(ctx, root)
if err != nil {
t.Fatal(err)
}
if res.Type != api.MetaType {
t.Fatal("unexpected root pin type")
}
pinDelay()
for _, c := range pinnedCids {
st := cl.StatusLocal(ctx, c)
if st.Status != api.TrackerStatusUnpinned {
t.Errorf("%s should have been unpinned but is %s", c, st.Status)
}
st2, err := cl.ipfs.PinLsCid(context.Background(), c)
if err != nil {
t.Fatal(err)
}
if st2 != api.IPFSPinStatusUnpinned {
t.Errorf("%s should have been unpinned in ipfs but is %d", c, st2)
}
}
})
}
// func singleShardedPin(t *testing.T, cl *Cluster) {
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// pinMeta(t, cl, []cid.Cid{cShard}, cCdag, cMeta)
// }
// func pinMeta(t *testing.T, cl *Cluster, shardCids []cid.Cid, cCdag, cMeta cid.Cid) {
// for _, cShard := range shardCids {
// shardPin := api.Pin{
// Cid: cShard,
// Type: api.ShardType,
// MaxDepth: 1,
// PinOptions: api.PinOptions{
// ReplicationFactorMin: -1,
// ReplicationFactorMax: -1,
// },
// }
// err := cl.Pin(shardPin)
// if err != nil {
// t.Fatal("shard pin should have worked:", err)
// }
// }
// parents := cid.NewSet()
// parents.Add(cMeta)
// cdagPin := api.Pin{
// Cid: cCdag,
// Type: api.ClusterDAGType,
// MaxDepth: 0,
// PinOptions: api.PinOptions{
// ReplicationFactorMin: -1,
// ReplicationFactorMax: -1,
// },
// }
// err := cl.Pin(cdagPin)
// if err != nil {
// t.Fatal("pin should have worked:", err)
// }
// metaPin := api.Pin{
// Cid: cMeta,
// Type: api.MetaType,
// Clusterdag: cCdag,
// }
// err = cl.Pin(metaPin)
// if err != nil {
// t.Fatal("pin should have worked:", err)
// }
// }
// func TestClusterPinMeta(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// singleShardedPin(t, cl)
// }
// func TestClusterUnpinShardFail(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// singleShardedPin(t, cl)
// // verify pins
// if len(cl.Pins()) != 3 {
// t.Fatal("should have 3 pins")
// }
// // Unpinning metadata should fail
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// err := cl.Unpin(cShard)
// if err == nil {
// t.Error("should error when unpinning shard")
// }
// err = cl.Unpin(cCdag)
// if err == nil {
// t.Error("should error when unpinning cluster dag")
// }
// }
// func TestClusterUnpinMeta(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// singleShardedPin(t, cl)
// // verify pins
// if len(cl.Pins()) != 3 {
// t.Fatal("should have 3 pins")
// }
// // Unpinning from root should work
// cMeta, _ := cid.Decode(test.MetaRootCid)
// err := cl.Unpin(cMeta)
// if err != nil {
// t.Error(err)
// }
// }
// func pinTwoParentsOneShard(t *testing.T, cl *Cluster) {
// singleShardedPin(t, cl)
// cShard, _ := cid.Decode(test.ShardCid)
// cShard2, _ := cid.Decode(test.ShardCid2)
// cCdag2, _ := cid.Decode(test.CdagCid2)
// cMeta2, _ := cid.Decode(test.MetaRootCid2)
// pinMeta(t, cl, []cid.Cid{cShard, cShard2}, cCdag2, cMeta2)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("pin should be in state")
// }
// if shardPin.Parents.Len() != 2 {
// t.Fatal("unexpected parent set in shared shard")
// }
// shardPin2, err := cl.PinGet(cShard2)
// if shardPin2.Parents.Len() != 1 {
// t.Fatal("unexpected parent set in unshared shard")
// }
// if err != nil {
// t.Fatal("pin should be in state")
// }
// }
// func TestClusterPinShardTwoParents(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// pinTwoParentsOneShard(t, cl)
// cShard, _ := cid.Decode(test.ShardCid)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard should be pinned")
// }
// if shardPin.Parents == nil || shardPin.Parents.Len() != 2 {
// t.Fatal("double pinned shard should have two parents")
// }
// }
// func TestClusterUnpinShardSecondParent(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// pinTwoParentsOneShard(t, cl)
// if len(cl.Pins()) != 6 {
// t.Fatal("should have 6 pins")
// }
// cMeta2, _ := cid.Decode(test.MetaRootCid2)
// err := cl.Unpin(cMeta2)
// if err != nil {
// t.Error(err)
// }
// pinDelay()
// if len(cl.Pins()) != 3 {
// t.Fatal("should have 3 pins")
// }
// cShard, _ := cid.Decode(test.ShardCid)
// cCdag, _ := cid.Decode(test.CdagCid)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard node should still be pinned")
// }
// if shardPin.Parents == nil || shardPin.Parents.Len() != 1 ||
// !shardPin.Parents.Has(cCdag) {
// t.Fatalf("shard node should have single original parent %v", shardPin.Parents.Keys())
// }
// }
// func TestClusterUnpinShardFirstParent(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// pinTwoParentsOneShard(t, cl)
// if len(cl.Pins()) != 6 {
// t.Fatal("should have 6 pins")
// }
// cMeta, _ := cid.Decode(test.MetaRootCid)
// err := cl.Unpin(cMeta)
// if err != nil {
// t.Error(err)
// }
// if len(cl.Pins()) != 4 {
// t.Fatal("should have 4 pins")
// }
// cShard, _ := cid.Decode(test.ShardCid)
// cShard2, _ := cid.Decode(test.ShardCid2)
// cCdag2, _ := cid.Decode(test.CdagCid2)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {
// t.Fatal("double pinned shard node should still be pinned")
// }
// if shardPin.Parents == nil || shardPin.Parents.Len() != 1 ||
// !shardPin.Parents.Has(cCdag2) {
// t.Fatal("shard node should have single original parent")
// }
// _, err = cl.PinGet(cShard2)
// if err != nil {
// t.Fatal("other shard shoud still be pinned too")
// }
// }
// func TestClusterPinTwoMethodsFail(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// // First pin normally then sharding pin fails
// c, _ := cid.Decode(test.MetaRootCid)
// err := cl.Pin(api.PinCid(c))
// if err != nil {
// t.Fatal("pin should have worked:", err)
// }
// cCdag, _ := cid.Decode(test.CdagCid)
// cMeta, _ := cid.Decode(test.MetaRootCid)
// metaPin := api.Pin{
// Cid: cMeta,
// Type: api.MetaType,
// Clusterdag: cCdag,
// }
// err = cl.Pin(metaPin)
// if err == nil {
// t.Fatal("pin should have failed:", err)
// }
// err = cl.Unpin(c)
// if err != nil {
// t.Fatal("unpin should have worked:", err)
// }
// singleShardedPin(t, cl)
// err = cl.Pin(api.PinCid(c))
// if err == nil {
// t.Fatal("pin should have failed:", err)
// }
// }
// func TestClusterRePinShard(t *testing.T) {
// cl, _, _, _ := testingCluster(t)
// defer cleanState()
// defer cl.Shutdown()
// cCdag, _ := cid.Decode(test.CdagCid)
// cShard, _ := cid.Decode(test.ShardCid)
// shardPin := api.Pin{
// Cid: cShard,
// Type: api.ShardType,
// ReplicationFactorMin: -1,
// ReplicationFactorMax: -1,
// Recursive: true,
// }
// err := cl.Pin(shardPin)
// if err != nil {
// t.Fatal("shard pin should have worked:", err)
// }
// parents := cid.NewSet()
// parents.Add(cCdag)
// shardPin.Parents = parents
// err = cl.Pin(shardPin)
// if err != nil {
// t.Fatal("repinning shard pin with different parents should have worked:", err)
// }
// shardPin.ReplicationFactorMin = 3
// shardPin.ReplicationFactorMax = 5
// err = cl.Pin(shardPin)
// if err == nil {
// t.Fatal("repinning shard pin with different repl factors should have failed:", err)
// }
// }
func TestClusterPins(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
c := test.Cid1
_, err := cl.Pin(ctx, c, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
pinDelay()
pins, err := cl.Pins(ctx)
if err != nil {
t.Fatal(err)
}
if len(pins) != 1 {
t.Fatal("pin should be part of the state")
}
if !pins[0].Cid.Equals(c) || pins[0].ReplicationFactorMin != -1 || pins[0].ReplicationFactorMax != -1 {
t.Error("the Pin does not look as expected")
}
}
func TestClusterPinGet(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
c := test.Cid1
_, err := cl.Pin(ctx, c, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
pin, err := cl.PinGet(ctx, c)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(c) || pin.ReplicationFactorMin != -1 || pin.ReplicationFactorMax != -1 {
t.Error("the Pin does not look as expected")
}
_, err = cl.PinGet(ctx, test.Cid2)
if err == nil {
t.Fatal("expected an error")
}
}
func TestClusterUnpin(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
c := test.Cid1
// Unpin should error without pin being committed to state
_, err := cl.Unpin(ctx, c)
if err == nil {
t.Error("unpin should have failed")
}
// Unpin after pin should succeed
_, err = cl.Pin(ctx, c, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
res, err := cl.Unpin(ctx, c)
if err != nil {
t.Error("unpin should have worked:", err)
}
if res.Type != api.DataType {
t.Error("unexpected pin type returned")
}
// test another error case
cl.consensus.Shutdown(ctx)
_, err = cl.Unpin(ctx, c)
if err == nil {
t.Error("expected an error but things worked")
}
}
func TestClusterUnpinPath(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
// Unpin should error without pin being committed to state
_, err := cl.UnpinPath(ctx, test.PathIPFS2)
if err == nil {
t.Error("unpin with path should have failed")
}
// Unpin after pin should succeed
pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{})
if err != nil {
t.Fatal("pin with should have worked:", err)
}
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
pin, err = cl.UnpinPath(ctx, test.PathIPFS2)
if err != nil {
t.Error("unpin with path should have worked:", err)
}
if !pin.Cid.Equals(test.CidResolved) {
t.Error("expected a different cid, found", pin.Cid.String())
}
}
func TestClusterPeers(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
peers := cl.Peers(ctx)
if len(peers) != 1 {
t.Fatal("expected 1 peer")
}
ident := &config.Identity{}
err := ident.LoadJSON(testingIdentity)
if err != nil {
t.Fatal(err)
}
if peers[0].ID != ident.ID {
fmt.Println(peers[0].ID)
fmt.Println(ident.ID)
t.Error("bad member")
}
}
func TestVersion(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
if cl.Version() != version.Version.String() {
t.Error("bad Version()")
}
}
func TestClusterRecoverAllLocal(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
defer cleanState()
defer cl.Shutdown(ctx)
_, err := cl.Pin(ctx, test.Cid1, api.PinOptions{})
if err != nil {
t.Fatal("pin should have worked:", err)
}
pinDelay()
recov, err := cl.RecoverAllLocal(ctx)
if err != nil {
t.Error("did not expect an error")
}
if len(recov) != 1 {
t.Fatalf("there should be only one pin, got = %d", len(recov))
}
if recov[0].Status != api.TrackerStatusPinned {
t.Errorf("the pin should have been recovered, got = %v", recov[0].Status)
}
}