Move testing mocks to subpackage so they can be re-used

Related to #18

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
This commit is contained in:
Hector Sanjuan 2017-02-09 16:29:17 +01:00
parent c0697599ac
commit 0e7091c6cb
20 changed files with 265 additions and 212 deletions

View File

@ -6,10 +6,9 @@ install:
- go get github.com/mattn/goveralls
- make deps
script:
- make test
- make service
- make ctl
- "$GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN"
- ./coverage.sh
env:
global:
secure: M3K3y9+D933tCda7+blW3qqVV8fA6PBDRdJoQvmQc1f0XYbWinJ+bAziFp6diKkF8sMQ+cPwLMONYJuaNT2h7/PkG+sIwF0PuUo5VVCbhGmSDrn2qOjmSnfawNs8wW31f44FQA8ICka1EFZcihohoIMf0e5xZ0tXA9jqw+ngPJiRnv4zyzC3r6t4JMAZcbS9w4KTYpIev5Yj72eCvk6lGjadSVCDVXo2sVs27tNt+BSgtMXiH6Sv8GLOnN2kFspGITgivHgB/jtU6QVtFXB+cbBJJAs3lUYnzmQZ5INecbjweYll07ilwFiCVNCX67+L15gpymKGJbQggloIGyTWrAOa2TMaB/bvblzwwQZ8wE5P3Rss5L0TFkUAcdU+3BUHM+TwV4e8F9x10v1PjgWNBRJQzd1sjKKgGUBCeyCY7VeYDKn9AXI5llISgY/AAfCZwm2cbckMHZZJciMjm+U3Q1FCF+rfhlvUcMG1VEj8r9cGpmWIRjFYVm0NmpUDDNjlC3/lUfTCOOJJyM254EUw63XxabbK6EtDN1yQe8kYRcXH//2rtEwgtMBgqHVY+OOkekzGz8Ra3EBkh6jXrAQL3zKu/GwRlK7/a1OU5MQ7dWcTjbx1AQ6Zfyjg5bZ+idqPgMbqM9Zn2+OaSby8HEEXS0QeZVooDVf/6wdYO4MQ/0A=

View File

@ -55,7 +55,7 @@ deps: gx
$(gx_bin) --verbose install --global
$(gx-go_bin) rewrite
test: deps
go test -tags silent -v -covermode count -coverprofile=coverage.out .
go test -tags silent -v -covermode count -coverprofile=coverage.out ./...
rw: gx
$(gx-go_bin) rewrite
rwundo: gx

View File

@ -21,7 +21,8 @@ import (
// Cluster is the main IPFS cluster component. It provides
// the go-API for it and orchestrates the components that make up the system.
type Cluster struct {
ctx context.Context
ctx context.Context
cancel func()
id peer.ID
config *Config
@ -38,7 +39,6 @@ type Cluster struct {
shutdownLock sync.Mutex
shutdown bool
shutdownCh chan struct{}
doneCh chan struct{}
readyCh chan struct{}
wg sync.WaitGroup
@ -53,7 +53,7 @@ type Cluster struct {
// this call returns (consensus may still be bootstrapping). Use Cluster.Ready()
// if you need to wait until the peer is fully up.
func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker PinTracker) (*Cluster, error) {
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
host, err := makeHost(ctx, cfg)
if err != nil {
return nil, err
@ -65,17 +65,17 @@ func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker P
}
c := &Cluster{
ctx: ctx,
id: host.ID(),
config: cfg,
host: host,
api: api,
ipfs: ipfs,
state: state,
tracker: tracker,
shutdownCh: make(chan struct{}, 1),
doneCh: make(chan struct{}, 1),
readyCh: make(chan struct{}, 1),
ctx: ctx,
cancel: cancel,
id: host.ID(),
config: cfg,
host: host,
api: api,
ipfs: ipfs,
state: state,
tracker: tracker,
doneCh: make(chan struct{}, 1),
readyCh: make(chan struct{}, 1),
}
c.setupPeerManager()
@ -163,17 +163,8 @@ func (c *Cluster) stateSyncWatcher() {
// run provides a cancellable context and launches some goroutines
// before signaling readyCh
func (c *Cluster) run() {
c.wg.Add(1)
// cancellable context
go func() {
defer c.wg.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c.ctx = ctx
go c.stateSyncWatcher()
go c.bootstrapAndReady()
<-c.shutdownCh
}()
go c.stateSyncWatcher()
go c.bootstrapAndReady()
}
func (c *Cluster) bootstrapAndReady() {
@ -258,6 +249,9 @@ func (c *Cluster) Shutdown() error {
c.peerManager.resetPeers()
}
// Cancel contexts
c.cancel()
if con := c.consensus; con != nil {
if err := con.Shutdown(); err != nil {
logger.Errorf("error stopping consensus: %s", err)
@ -280,7 +274,6 @@ func (c *Cluster) Shutdown() error {
logger.Errorf("error stopping PinTracker: %s", err)
return err
}
c.shutdownCh <- struct{}{}
c.wg.Wait()
c.host.Close() // Shutdown all network services
c.shutdown = true

View File

@ -5,6 +5,8 @@ import (
"testing"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
@ -37,7 +39,7 @@ func (ipfs *mockConnector) ID() (api.IPFSID, error) {
return api.IPFSID{}, errors.New("")
}
return api.IPFSID{
ID: testPeerID,
ID: test.TestPeerID1,
}, nil
}
@ -62,7 +64,7 @@ func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (api.IPFSPinStatus, error) {
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs() (map[string]api.IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLs(filter string) (map[string]api.IPFSPinStatus, error) {
if ipfs.returnError {
return nil, errors.New("")
}
@ -70,11 +72,11 @@ func (ipfs *mockConnector) PinLs() (map[string]api.IPFSPinStatus, error) {
return m, nil
}
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *MapState, *MapPinTracker) {
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *MapPinTracker) {
api := &mockAPI{}
ipfs := &mockConnector{}
cfg := testingConfig()
st := NewMapState()
st := mapstate.NewMapState()
tracker := NewMapPinTracker(cfg)
cl, err := NewCluster(
@ -114,7 +116,7 @@ func TestClusterStateSync(t *testing.T) {
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err = cl.Pin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
@ -158,7 +160,7 @@ func TestClusterPin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
@ -177,7 +179,7 @@ func TestClusterUnpin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := cl.Unpin(c)
if err != nil {
t.Fatal("pin should have worked:", err)

View File

@ -6,36 +6,39 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
)
func TestApplyToPin(t *testing.T) {
op := &clusterLogOp{
Arg: testCid,
Arg: test.TestCid1,
Type: LogOpPin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
rpcClient: test.NewMockRPCClient(t),
}
st := NewMapState()
st := mapstate.NewMapState()
op.ApplyTo(st)
pins := st.ListPins()
if len(pins) != 1 || pins[0].String() != testCid {
if len(pins) != 1 || pins[0].String() != test.TestCid1 {
t.Error("the state was not modified correctly")
}
}
func TestApplyToUnpin(t *testing.T) {
op := &clusterLogOp{
Arg: testCid,
Arg: test.TestCid1,
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
rpcClient: test.NewMockRPCClient(t),
}
st := NewMapState()
c, _ := cid.Decode(testCid)
st := mapstate.NewMapState()
c, _ := cid.Decode(test.TestCid1)
st.AddPin(c)
op.ApplyTo(st)
pins := st.ListPins()
@ -52,10 +55,10 @@ func TestApplyToBadState(t *testing.T) {
}()
op := &clusterLogOp{
Arg: testCid,
Arg: test.TestCid1,
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
rpcClient: test.NewMockRPCClient(t),
}
var st interface{}
@ -73,10 +76,10 @@ func TestApplyToBadCid(t *testing.T) {
Arg: "agadfaegf",
Type: LogOpPin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
rpcClient: test.NewMockRPCClient(t),
}
st := NewMapState()
st := mapstate.NewMapState()
op.ApplyTo(st)
}
@ -92,12 +95,12 @@ func testingConsensus(t *testing.T) *Consensus {
if err != nil {
t.Fatal("cannot create host:", err)
}
st := NewMapState()
st := mapstate.NewMapState()
cc, err := NewConsensus([]peer.ID{cfg.ID}, h, cfg.ConsensusDataFolder, st)
if err != nil {
t.Fatal("cannot create Consensus:", err)
}
cc.SetClient(mockRPCClient(t))
cc.SetClient(test.NewMockRPCClient(t))
<-cc.Ready()
return cc
}
@ -124,7 +127,7 @@ func TestConsensusPin(t *testing.T) {
defer cleanRaft() // Remember defer runs in LIFO order
defer cc.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(c)
if err != nil {
t.Error("the operation did not make it to the log:", err)
@ -137,7 +140,7 @@ func TestConsensusPin(t *testing.T) {
}
pins := st.ListPins()
if len(pins) != 1 || pins[0].String() != testCid {
if len(pins) != 1 || pins[0].String() != test.TestCid1 {
t.Error("the added pin should be in the state")
}
}
@ -147,7 +150,7 @@ func TestConsensusUnpin(t *testing.T) {
defer cleanRaft()
defer cc.Shutdown()
c, _ := cid.Decode(testCid2)
c, _ := cid.Decode(test.TestCid2)
err := cc.LogUnpin(c)
if err != nil {
t.Error("the operation did not make it to the log:", err)

28
coverage.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
if [ -z $COVERALLS_TOKEN ]
then
exit 1
fi
echo "mode: count" > fullcov.out
dirs=$(find ./* -maxdepth 10 -type d )
dirs=". $dirs"
for dir in $dirs;
do
if ls "$dir"/*.go &> /dev/null;
then
go test -coverprofile=profile.out -covermode=count -tags silent "$dir"
if [ $? -ne 0 ];
then
exit 1
fi
if [ -f profile.out ]
then
cat profile.out | grep -v "^mode: count" >> fullcov.out
fi
fi
done
$HOME/gopath/bin/goveralls -coverprofile=fullcov.out -service=travis-ci -repotoken $COVERALLS_TOKEN
rm -rf ./profile.out
rm -rf ./fullcov.out

View File

@ -13,6 +13,7 @@ import (
"github.com/urfave/cli"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/state/mapstate"
)
// ProgramName of this application
@ -234,7 +235,7 @@ func run(c *cli.Context) error {
proxy, err := ipfscluster.NewIPFSHTTPConnector(cfg)
checkErr("creating IPFS Connector component", err)
state := ipfscluster.NewMapState()
state := mapstate.NewMapState()
tracker := ipfscluster.NewMapPinTracker(cfg)
cluster, err := ipfscluster.NewCluster(
cfg,

View File

@ -422,10 +422,10 @@ func (ipfs *IPFSHTTPConnector) Unpin(hash *cid.Cid) error {
return nil
}
// PinLs performs a "pin ls" request against the configured IPFS daemon and
// returns a map of cid strings and their status.
func (ipfs *IPFSHTTPConnector) PinLs() (map[string]api.IPFSPinStatus, error) {
body, err := ipfs.get("pin/ls")
// PinLs performs a "pin ls --type typeFilter" request against the configured
// IPFS daemon and returns a map of cid strings and their status.
func (ipfs *IPFSHTTPConnector) PinLs(typeFilter string) (map[string]api.IPFSPinStatus, error) {
body, err := ipfs.get("pin/ls?type=" + typeFilter)
// Some error talking to the daemon
if err != nil {

View File

@ -8,27 +8,28 @@ import (
"testing"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
ma "github.com/multiformats/go-multiaddr"
)
func testIPFSConnectorConfig(mock *ipfsMock) *Config {
func testIPFSConnectorConfig(mock *test.IpfsMock) *Config {
cfg := testingConfig()
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.addr, mock.port))
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port))
cfg.IPFSNodeAddr = addr
return cfg
}
func testIPFSConnector(t *testing.T) (*IPFSHTTPConnector, *ipfsMock) {
mock := newIpfsMock()
func testIPFSConnector(t *testing.T) (*IPFSHTTPConnector, *test.IpfsMock) {
mock := test.NewIpfsMock()
cfg := testIPFSConnectorConfig(mock)
ipfs, err := NewIPFSHTTPConnector(cfg)
if err != nil {
t.Fatal("creating an IPFSConnector should work: ", err)
}
ipfs.SetClient(mockRPCClient(t))
ipfs.SetClient(test.NewMockRPCClient(t))
return ipfs, mock
}
@ -45,7 +46,7 @@ func TestIPFSID(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if id.ID != testPeerID {
if id.ID != test.TestPeerID1 {
t.Error("expected testPeerID")
}
if len(id.Addresses) != 1 {
@ -68,7 +69,7 @@ func TestIPFSPin(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := ipfs.Pin(c)
if err != nil {
t.Error("expected success pinning cid")
@ -81,7 +82,7 @@ func TestIPFSPin(t *testing.T) {
t.Error("cid should have been pinned")
}
c2, _ := cid.Decode(errorCid)
c2, _ := cid.Decode(test.ErrorCid)
err = ipfs.Pin(c2)
if err == nil {
t.Error("expected error pinning cid")
@ -92,7 +93,7 @@ func TestIPFSUnpin(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := ipfs.Unpin(c)
if err != nil {
t.Error("expected success unpinning non-pinned cid")
@ -108,8 +109,8 @@ func TestIPFSPinLsCid(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c2, _ := cid.Decode(testCid2)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
ipfs.Pin(c)
ips, err := ipfs.PinLsCid(c)
@ -127,12 +128,12 @@ func TestIPFSPinLs(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c2, _ := cid.Decode(testCid2)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
ipfs.Pin(c)
ipfs.Pin(c2)
ipsMap, err := ipfs.PinLs()
ipsMap, err := ipfs.PinLs("")
if err != nil {
t.Error("should not error")
}
@ -141,7 +142,7 @@ func TestIPFSPinLs(t *testing.T) {
t.Fatal("the map does not contain expected keys")
}
if !ipsMap[testCid].IsPinned() || !ipsMap[testCid2].IsPinned() {
if !ipsMap[test.TestCid1].IsPinned() || !ipsMap[test.TestCid2].IsPinned() {
t.Error("c1 and c2 should appear pinned")
}
}
@ -193,7 +194,7 @@ func TestIPFSProxyPin(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/add?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -208,7 +209,7 @@ func TestIPFSProxyPin(t *testing.T) {
t.Fatal(err)
}
if len(resp.Pins) != 1 || resp.Pins[0] != testCid {
if len(resp.Pins) != 1 || resp.Pins[0] != test.TestCid1 {
t.Error("wrong response")
}
res.Body.Close()
@ -217,7 +218,7 @@ func TestIPFSProxyPin(t *testing.T) {
res, err = http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/add?arg=%s",
host,
port,
errorCid))
test.ErrorCid))
if err != nil {
t.Fatal("request should work: ", err)
}
@ -232,7 +233,7 @@ func TestIPFSProxyPin(t *testing.T) {
t.Fatal(err)
}
if respErr.Message != errBadCid.Error() {
if respErr.Message != test.ErrBadCid.Error() {
t.Error("wrong response")
}
res.Body.Close()
@ -249,7 +250,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/rm?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -265,7 +266,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
t.Fatal(err)
}
if len(resp.Pins) != 1 || resp.Pins[0] != testCid {
if len(resp.Pins) != 1 || resp.Pins[0] != test.TestCid1 {
t.Error("wrong response")
}
res.Body.Close()
@ -274,7 +275,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
res, err = http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/rm?arg=%s",
host,
port,
errorCid))
test.ErrorCid))
if err != nil {
t.Fatal("request should work: ", err)
}
@ -289,7 +290,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
t.Fatal(err)
}
if respErr.Message != errBadCid.Error() {
if respErr.Message != test.ErrBadCid.Error() {
t.Error("wrong response")
}
res.Body.Close()
@ -306,7 +307,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/ls?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -322,7 +323,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
t.Fatal(err)
}
_, ok := resp.Keys[testCid]
_, ok := resp.Keys[test.TestCid1]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}

View File

@ -43,7 +43,7 @@ type IPFSConnector interface {
Pin(*cid.Cid) error
Unpin(*cid.Cid) error
PinLsCid(*cid.Cid) (api.IPFSPinStatus, error)
PinLs() (map[string]api.IPFSPinStatus, error)
PinLs(typeFilter string) (map[string]api.IPFSPinStatus, error)
}
// Peered represents a component which needs to be aware of the peers

View File

@ -9,6 +9,8 @@ import (
"time"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
@ -16,15 +18,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
)
var (
testCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq"
testCid = testCid1
testCid2 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma"
testCid3 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb"
errorCid = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc"
testPeerID, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
)
//TestClusters*
var (
// number of clusters to create
@ -58,12 +51,12 @@ func randomBytes() []byte {
return bs
}
func createComponents(t *testing.T, i int) (*Config, *RESTAPI, *IPFSHTTPConnector, *MapState, *MapPinTracker, *ipfsMock) {
mock := newIpfsMock()
func createComponents(t *testing.T, i int) (*Config, *RESTAPI, *IPFSHTTPConnector, *mapstate.MapState, *MapPinTracker, *test.IpfsMock) {
mock := test.NewIpfsMock()
clusterAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", clusterPort+i))
apiAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort+i))
proxyAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsProxyPort+i))
nodeAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.addr, mock.port))
nodeAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port))
priv, pub, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
checkErr(t, err)
pid, err := peer.IDFromPublicKey(pub)
@ -84,33 +77,33 @@ func createComponents(t *testing.T, i int) (*Config, *RESTAPI, *IPFSHTTPConnecto
checkErr(t, err)
ipfs, err := NewIPFSHTTPConnector(cfg)
checkErr(t, err)
state := NewMapState()
state := mapstate.NewMapState()
tracker := NewMapPinTracker(cfg)
return cfg, api, ipfs, state, tracker, mock
}
func createCluster(t *testing.T, cfg *Config, api *RESTAPI, ipfs *IPFSHTTPConnector, state *MapState, tracker *MapPinTracker) *Cluster {
func createCluster(t *testing.T, cfg *Config, api *RESTAPI, ipfs *IPFSHTTPConnector, state *mapstate.MapState, tracker *MapPinTracker) *Cluster {
cl, err := NewCluster(cfg, api, ipfs, state, tracker)
checkErr(t, err)
<-cl.Ready()
return cl
}
func createOnePeerCluster(t *testing.T, nth int) (*Cluster, *ipfsMock) {
func createOnePeerCluster(t *testing.T, nth int) (*Cluster, *test.IpfsMock) {
cfg, api, ipfs, state, tracker, mock := createComponents(t, nth)
cl := createCluster(t, cfg, api, ipfs, state, tracker)
return cl, mock
}
func createClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
os.RemoveAll("./e2eTestRaft")
cfgs := make([]*Config, nClusters, nClusters)
apis := make([]*RESTAPI, nClusters, nClusters)
ipfss := make([]*IPFSHTTPConnector, nClusters, nClusters)
states := make([]*MapState, nClusters, nClusters)
states := make([]*mapstate.MapState, nClusters, nClusters)
trackers := make([]*MapPinTracker, nClusters, nClusters)
ipfsMocks := make([]*ipfsMock, nClusters, nClusters)
ipfsMocks := make([]*test.IpfsMock, nClusters, nClusters)
clusters := make([]*Cluster, nClusters, nClusters)
clusterPeers := make([]ma.Multiaddr, nClusters, nClusters)
@ -164,7 +157,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
return clusters, ipfsMocks
}
func shutdownClusters(t *testing.T, clusters []*Cluster, m []*ipfsMock) {
func shutdownClusters(t *testing.T, clusters []*Cluster, m []*test.IpfsMock) {
for i, c := range clusters {
m[i].Close()
err := c.Shutdown()
@ -253,7 +246,7 @@ func TestClustersPeers(t *testing.T) {
func TestClustersPin(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
exampleCid, _ := cid.Decode(testCid)
exampleCid, _ := cid.Decode(test.TestCid1)
prefix := exampleCid.Prefix()
for i := 0; i < nPins; i++ {
j := rand.Intn(nClusters) // choose a random cluster peer
@ -316,7 +309,7 @@ func TestClustersPin(t *testing.T) {
func TestClustersStatusAll(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(testCid)
h, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(h)
delay()
// Global status
@ -328,7 +321,7 @@ func TestClustersStatusAll(t *testing.T) {
if len(statuses) == 0 {
t.Fatal("bad status. Expected one item")
}
if statuses[0].Cid.String() != testCid {
if statuses[0].Cid.String() != test.TestCid1 {
t.Error("bad cid in status")
}
info := statuses[0].PeerMap
@ -360,8 +353,8 @@ func TestClustersStatusAll(t *testing.T) {
func TestClustersSyncAllLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -388,8 +381,8 @@ func TestClustersSyncAllLocal(t *testing.T) {
func TestClustersSyncLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -419,8 +412,8 @@ func TestClustersSyncLocal(t *testing.T) {
func TestClustersSyncAll(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -433,8 +426,8 @@ func TestClustersSyncAll(t *testing.T) {
if len(ginfos) != 1 {
t.Fatal("expected globalsync to have 1 elements")
}
if ginfos[0].Cid.String() != errorCid {
t.Error("expected globalsync to have problems with errorCid")
if ginfos[0].Cid.String() != test.ErrorCid {
t.Error("expected globalsync to have problems with test.ErrorCid")
}
for _, c := range clusters {
inf, ok := ginfos[0].PeerMap[c.host.ID()]
@ -450,8 +443,8 @@ func TestClustersSyncAll(t *testing.T) {
func TestClustersSync(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -471,8 +464,8 @@ func TestClustersSync(t *testing.T) {
t.Error("pinInfo error should not be empty")
}
if ginfo.Cid.String() != errorCid {
t.Error("GlobalPinInfo should be for errorCid")
if ginfo.Cid.String() != test.ErrorCid {
t.Error("GlobalPinInfo should be for test.ErrorCid")
}
for _, c := range clusters {
@ -493,7 +486,7 @@ func TestClustersSync(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != testCid2 {
if ginfo.Cid.String() != test.TestCid2 {
t.Error("GlobalPinInfo should be for testrCid2")
}
@ -511,8 +504,8 @@ func TestClustersSync(t *testing.T) {
func TestClustersRecoverLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
@ -543,8 +536,8 @@ func TestClustersRecoverLocal(t *testing.T) {
func TestClustersRecover(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
@ -583,7 +576,7 @@ func TestClustersRecover(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != testCid2 {
if ginfo.Cid.String() != test.TestCid2 {
t.Error("GlobalPinInfo should be for testrCid2")
}

View File

@ -261,7 +261,7 @@ func (mpt *MapPinTracker) SyncAll() ([]api.PinInfo, error) {
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPinLs",
struct{}{},
"recursive",
&ipsMap)
if err != nil {
mpt.mux.Lock()

View File

@ -6,13 +6,15 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
ma "github.com/multiformats/go-multiaddr"
)
func peerManagerClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
func peerManagerClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
cls := make([]*Cluster, nClusters, nClusters)
mocks := make([]*ipfsMock, nClusters, nClusters)
mocks := make([]*test.IpfsMock, nClusters, nClusters)
var wg sync.WaitGroup
for i := 0; i < nClusters; i++ {
wg.Add(1)
@ -53,7 +55,7 @@ func TestClustersPeerAdd(t *testing.T) {
}
}
h, _ := cid.Decode(testCid)
h, _ := cid.Decode(test.TestCid1)
err := clusters[1].Pin(h)
if err != nil {
t.Fatal(err)
@ -217,7 +219,7 @@ func TestClustersPeerJoin(t *testing.T) {
t.Fatal(err)
}
}
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()
@ -250,7 +252,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
}
runF(t, clusters[1:], f)
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()
@ -292,7 +294,7 @@ func TestClustersPeerJoinAllAtOnceWithRandomBootstrap(t *testing.T) {
}
runF(t, clusters[2:], f)
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()

View File

@ -9,6 +9,7 @@ import (
"testing"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
)
var (
@ -26,7 +27,7 @@ func testRESTAPI(t *testing.T) *RESTAPI {
// No keep alive! Otherwise tests hang with
// connections re-used from previous tests
rest.server.SetKeepAlivesEnabled(false)
rest.SetClient(mockRPCClient(t))
rest.SetClient(test.NewMockRPCClient(t))
return rest
}
@ -81,7 +82,7 @@ func TestRestAPIIDEndpoint(t *testing.T) {
defer rest.Shutdown()
id := api.IDSerial{}
makeGet(t, "/id", &id)
if id.ID != testPeerID.Pretty() {
if id.ID != test.TestPeerID1.Pretty() {
t.Error("expected correct id")
}
}
@ -105,7 +106,7 @@ func TestRESTAPIPeerstEndpoint(t *testing.T) {
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID != testPeerID.Pretty() {
if list[0].ID != test.TestPeerID1.Pretty() {
t.Error("expected a different peer id list: ", list)
}
}
@ -116,11 +117,11 @@ func TestRESTAPIPeerAddEndpoint(t *testing.T) {
id := api.IDSerial{}
// post with valid body
body := fmt.Sprintf("{\"peer_multiaddress\":\"/ip4/1.2.3.4/tcp/1234/ipfs/%s\"}", testPeerID.Pretty())
body := fmt.Sprintf("{\"peer_multiaddress\":\"/ip4/1.2.3.4/tcp/1234/ipfs/%s\"}", test.TestPeerID1.Pretty())
t.Log(body)
makePost(t, "/peers", []byte(body), &id)
if id.ID != testPeerID.Pretty() {
if id.ID != test.TestPeerID1.Pretty() {
t.Error("expected correct ID")
}
if id.Error != "" {
@ -144,7 +145,7 @@ func TestRESTAPIPeerRemoveEndpoint(t *testing.T) {
rest := testRESTAPI(t)
defer rest.Shutdown()
makeDelete(t, "/peers/"+testPeerID.Pretty(), &struct{}{})
makeDelete(t, "/peers/"+test.TestPeerID1.Pretty(), &struct{}{})
}
func TestRESTAPIPinEndpoint(t *testing.T) {
@ -152,11 +153,11 @@ func TestRESTAPIPinEndpoint(t *testing.T) {
defer rest.Shutdown()
// test regular post
makePost(t, "/pins/"+testCid, []byte{}, &struct{}{})
makePost(t, "/pins/"+test.TestCid1, []byte{}, &struct{}{})
errResp := errorResp{}
makePost(t, "/pins/"+errorCid, []byte{}, &errResp)
if errResp.Message != errBadCid.Error() {
makePost(t, "/pins/"+test.ErrorCid, []byte{}, &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -171,11 +172,11 @@ func TestRESTAPIUnpinEndpoint(t *testing.T) {
defer rest.Shutdown()
// test regular delete
makeDelete(t, "/pins/"+testCid, &struct{}{})
makeDelete(t, "/pins/"+test.TestCid1, &struct{}{})
errResp := errorResp{}
makeDelete(t, "/pins/"+errorCid, &errResp)
if errResp.Message != errBadCid.Error() {
makeDelete(t, "/pins/"+test.ErrorCid, &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -192,8 +193,8 @@ func TestRESTAPIPinListEndpoint(t *testing.T) {
var resp []string
makeGet(t, "/pinlist", &resp)
if len(resp) != 3 ||
resp[0] != testCid1 || resp[1] != testCid2 ||
resp[2] != testCid3 {
resp[0] != test.TestCid1 || resp[1] != test.TestCid2 ||
resp[2] != test.TestCid3 {
t.Error("unexpected pin list: ", resp)
}
}
@ -205,8 +206,8 @@ func TestRESTAPIStatusAllEndpoint(t *testing.T) {
var resp []api.GlobalPinInfoSerial
makeGet(t, "/pins", &resp)
if len(resp) != 3 ||
resp[0].Cid != testCid1 ||
resp[1].PeerMap[testPeerID.Pretty()].Status != "pinning" {
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusResp:\n %+v", resp)
}
}
@ -216,14 +217,14 @@ func TestRESTAPIStatusEndpoint(t *testing.T) {
defer rest.Shutdown()
var resp api.GlobalPinInfoSerial
makeGet(t, "/pins/"+testCid, &resp)
makeGet(t, "/pins/"+test.TestCid1, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")
@ -238,8 +239,8 @@ func TestRESTAPISyncAllEndpoint(t *testing.T) {
makePost(t, "/pins/sync", []byte{}, &resp)
if len(resp) != 3 ||
resp[0].Cid != testCid1 ||
resp[1].PeerMap[testPeerID.Pretty()].Status != "pinning" {
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusResp:\n %+v", resp)
}
}
@ -249,14 +250,14 @@ func TestRESTAPISyncEndpoint(t *testing.T) {
defer rest.Shutdown()
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+testCid+"/sync", []byte{}, &resp)
makePost(t, "/pins/"+test.TestCid1+"/sync", []byte{}, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")
@ -268,14 +269,14 @@ func TestRESTAPIRecoverEndpoint(t *testing.T) {
defer rest.Shutdown()
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+testCid+"/recover", []byte{}, &resp)
makePost(t, "/pins/"+test.TestCid1+"/recover", []byte{}, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")

View File

@ -213,8 +213,8 @@ func (rpcapi *RPCAPI) IPFSPinLsCid(in api.CidArgSerial, out *api.IPFSPinStatus)
}
// IPFSPinLs runs IPFSConnector.PinLs().
func (rpcapi *RPCAPI) IPFSPinLs(in struct{}, out *map[string]api.IPFSPinStatus) error {
m, err := rpcapi.c.ipfs.PinLs()
func (rpcapi *RPCAPI) IPFSPinLs(in string, out *map[string]api.IPFSPinStatus) error {
m, err := rpcapi.c.ipfs.PinLs(in)
*out = m
return err
}

View File

@ -1,4 +1,4 @@
package ipfscluster
package mapstate
import (
"sync"
@ -9,17 +9,14 @@ import (
// MapState is a very simple database to store the state of the system
// using a Go map. It is thread safe. It implements the State interface.
type MapState struct {
pinMux sync.RWMutex
PinMap map[string]struct{}
peerMux sync.RWMutex
PeerMap map[string]string
pinMux sync.RWMutex
PinMap map[string]struct{}
}
// NewMapState initializes the internal map and returns a new MapState object.
func NewMapState() *MapState {
return &MapState{
PinMap: make(map[string]struct{}),
PeerMap: make(map[string]string),
PinMap: make(map[string]struct{}),
}
}

13
test/cids.go Normal file
View File

@ -0,0 +1,13 @@
package test
import peer "github.com/libp2p/go-libp2p-peer"
var (
TestCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq"
TestCid2 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma"
TestCid3 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb"
ErrorCid = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc"
TestPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ = peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ = peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
)

View File

@ -1,4 +1,4 @@
package ipfscluster
package test
import (
"encoding/json"
@ -9,17 +9,17 @@ import (
"strconv"
"strings"
"github.com/ipfs/ipfs-cluster/state/mapstate"
cid "github.com/ipfs/go-cid"
)
// This is an ipfs daemon mock which should sustain the functionality used by
// ipfscluster.
type ipfsMock struct {
// IpfsMock is an ipfs daemon mock which should sustain the functionality used by ipfscluster.
type IpfsMock struct {
server *httptest.Server
addr string
port int
pinMap *MapState
Addr string
Port int
pinMap *mapstate.MapState
}
type mockPinResp struct {
@ -39,9 +39,15 @@ type ipfsErr struct {
Message string
}
func newIpfsMock() *ipfsMock {
st := NewMapState()
m := &ipfsMock{
type idResp struct {
ID string
Addresses []string
}
// NewIpfsMock returns a new mock.
func NewIpfsMock() *IpfsMock {
st := mapstate.NewMapState()
m := &IpfsMock{
pinMap: st,
}
ts := httptest.NewServer(http.HandlerFunc(m.handler))
@ -51,21 +57,21 @@ func newIpfsMock() *ipfsMock {
h := strings.Split(url.Host, ":")
i, _ := strconv.Atoi(h[1])
m.port = i
m.addr = h[0]
m.Port = i
m.Addr = h[0]
return m
}
// FIXME: what if IPFS API changes?
func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
p := r.URL.Path
endp := strings.TrimPrefix(p, "/api/v0/")
var cidStr string
switch endp {
case "id":
resp := ipfsIDResp{
ID: testPeerID.Pretty(),
resp := idResp{
ID: TestPeerID1.Pretty(),
Addresses: []string{
"/ip4/0.0.0.0/tcp/1234",
},
@ -79,7 +85,7 @@ func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
goto ERROR
}
cidStr = arg[0]
if cidStr == errorCid {
if cidStr == ErrorCid {
goto ERROR
}
c, err := cid.Decode(cidStr)
@ -153,6 +159,6 @@ ERROR:
w.WriteHeader(http.StatusInternalServerError)
}
func (m *ipfsMock) Close() {
func (m *IpfsMock) Close() {
m.server.Close()
}

View File

@ -1,4 +1,4 @@
package ipfscluster
package test
import (
"errors"
@ -12,11 +12,13 @@ import (
peer "github.com/libp2p/go-libp2p-peer"
)
var errBadCid = errors.New("this is an expected error when using errorCid")
var ErrBadCid = errors.New("this is an expected error when using ErrorCid")
type mockService struct{}
func mockRPCClient(t *testing.T) *rpc.Client {
// NewMockRPCClient creates a mock ipfs-cluster RPC server and returns
// a client to it.
func NewMockRPCClient(t *testing.T) *rpc.Client {
s := rpc.NewServer(nil, "mock")
c := rpc.NewClientWithServer(nil, "mock", s)
err := s.RegisterName("Cluster", &mockService{})
@ -27,21 +29,21 @@ func mockRPCClient(t *testing.T) *rpc.Client {
}
func (mock *mockService) Pin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
if in.Cid == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) Unpin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
if in.Cid == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) PinList(in struct{}, out *[]string) error {
*out = []string{testCid, testCid2, testCid3}
*out = []string{TestCid1, TestCid2, TestCid3}
return nil
}
@ -50,11 +52,11 @@ func (mock *mockService) ID(in struct{}, out *api.IDSerial) error {
// DefaultConfigCrypto,
// DefaultConfigKeyLength)
*out = api.ID{
ID: testPeerID,
ID: TestPeerID1,
//PublicKey: pubkey,
Version: "0.0.mock",
IPFS: api.IPFSID{
ID: testPeerID,
ID: TestPeerID1,
},
}.ToSerial()
return nil
@ -84,17 +86,26 @@ func (mock *mockService) PeerRemove(in peer.ID, out *struct{}) error {
return nil
}
// FIXME: dup from util.go
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
c1, _ := cid.Decode(testCid1)
c2, _ := cid.Decode(testCid2)
c3, _ := cid.Decode(testCid3)
c1, _ := cid.Decode(TestCid1)
c2, _ := cid.Decode(TestCid2)
c3, _ := cid.Decode(TestCid3)
*out = globalPinInfoSliceToSerial([]api.GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
TestPeerID1: {
Cid: c1,
Peer: testPeerID,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
@ -103,9 +114,9 @@ func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial)
{
Cid: c2,
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
TestPeerID1: {
Cid: c2,
Peer: testPeerID,
Peer: TestPeerID1,
Status: api.TrackerStatusPinning,
TS: time.Now(),
},
@ -114,9 +125,9 @@ func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial)
{
Cid: c3,
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
TestPeerID1: {
Cid: c3,
Peer: testPeerID,
Peer: TestPeerID1,
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
@ -127,16 +138,16 @@ func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial)
}
func (mock *mockService) Status(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
if in.Cid == errorCid {
return errBadCid
if in.Cid == ErrorCid {
return ErrBadCid
}
c1, _ := cid.Decode(testCid1)
c1, _ := cid.Decode(TestCid1)
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
TestPeerID1: {
Cid: c1,
Peer: testPeerID,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},

3
test/test.go Normal file
View File

@ -0,0 +1,3 @@
// Package test offers testing utilities to ipfs-cluster like
// mocks
package test