2018-09-26 11:42:20 +00:00
|
|
|
package ipfscluster
|
|
|
|
|
|
|
|
// This files has tests for Add* using multiple cluster peers.
|
|
|
|
|
|
|
|
import (
|
2018-06-27 04:03:15 +00:00
|
|
|
"context"
|
2018-09-26 11:42:20 +00:00
|
|
|
"mime/multipart"
|
2019-08-05 05:01:07 +00:00
|
|
|
"sync"
|
2018-09-26 11:42:20 +00:00
|
|
|
"testing"
|
2019-07-30 12:00:55 +00:00
|
|
|
"time"
|
2018-09-26 11:42:20 +00:00
|
|
|
|
2019-08-05 05:01:07 +00:00
|
|
|
files "github.com/ipfs/go-ipfs-files"
|
2022-06-15 09:19:17 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/adder"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/api"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/test"
|
2019-08-05 05:01:07 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
2018-09-26 11:42:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestAdd(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2018-09-26 11:42:20 +00:00
|
|
|
clusters, mock := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mock)
|
|
|
|
sth := test.NewShardingTestHelper()
|
|
|
|
defer sth.Clean(t)
|
|
|
|
|
2019-07-09 00:37:47 +00:00
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
|
2022-02-01 21:29:54 +00:00
|
|
|
t.Run("default", func(t *testing.T) {
|
2018-09-26 11:42:20 +00:00
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
|
|
|
mfr, closer := sth.GetTreeMultiReader(t)
|
|
|
|
defer closer.Close()
|
|
|
|
r := multipart.NewReader(mfr, mfr.Boundary())
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
ci, err := clusters[0].AddFile(context.Background(), r, params)
|
2018-09-26 11:42:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if ci.String() != test.ShardingDirBalancedRootCID {
|
|
|
|
t.Fatal("unexpected root CID for local add")
|
|
|
|
}
|
|
|
|
|
2019-07-30 12:00:55 +00:00
|
|
|
// We need to sleep a lot because it takes time to
|
|
|
|
// catch up on a first/single pin on crdts
|
|
|
|
time.Sleep(10 * time.Second)
|
2018-09-26 11:42:20 +00:00
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
2018-06-27 04:03:15 +00:00
|
|
|
pin := c.StatusLocal(ctx, ci)
|
2018-09-26 11:42:20 +00:00
|
|
|
if pin.Error != "" {
|
|
|
|
t.Error(pin.Error)
|
|
|
|
}
|
|
|
|
if pin.Status != api.TrackerStatusPinned {
|
2018-08-15 10:30:00 +00:00
|
|
|
t.Error("item should be pinned and is", pin.Status)
|
2018-09-26 11:42:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runF(t, clusters, f)
|
|
|
|
})
|
2022-02-01 21:29:54 +00:00
|
|
|
|
|
|
|
t.Run("local_one_allocation", func(t *testing.T) {
|
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
|
|
|
params.ReplicationFactorMin = 1
|
|
|
|
params.ReplicationFactorMax = 1
|
|
|
|
params.Local = true
|
|
|
|
mfr, closer := sth.GetTreeMultiReader(t)
|
|
|
|
defer closer.Close()
|
|
|
|
r := multipart.NewReader(mfr, mfr.Boundary())
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
ci, err := clusters[2].AddFile(context.Background(), r, params)
|
2022-02-01 21:29:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if ci.String() != test.ShardingDirBalancedRootCID {
|
|
|
|
t.Fatal("unexpected root CID for local add")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to sleep a lot because it takes time to
|
|
|
|
// catch up on a first/single pin on crdts
|
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
|
|
|
pin := c.StatusLocal(ctx, ci)
|
|
|
|
if pin.Error != "" {
|
|
|
|
t.Error(pin.Error)
|
|
|
|
}
|
|
|
|
switch c.id {
|
|
|
|
case clusters[2].id:
|
|
|
|
if pin.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("item should be pinned and is", pin.Status)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if pin.Status != api.TrackerStatusRemote {
|
|
|
|
t.Errorf("item should only be allocated to cluster2")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runF(t, clusters, f)
|
|
|
|
})
|
2018-09-26 11:42:20 +00:00
|
|
|
}
|
|
|
|
|
2019-08-11 18:08:54 +00:00
|
|
|
func TestAddWithUserAllocations(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
clusters, mock := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mock)
|
|
|
|
sth := test.NewShardingTestHelper()
|
|
|
|
defer sth.Clean(t)
|
|
|
|
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
|
|
|
|
t.Run("local", func(t *testing.T) {
|
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.ReplicationFactorMin = 2
|
|
|
|
params.ReplicationFactorMax = 2
|
|
|
|
params.UserAllocations = []peer.ID{clusters[0].id, clusters[1].id}
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
|
|
|
mfr, closer := sth.GetTreeMultiReader(t)
|
|
|
|
defer closer.Close()
|
|
|
|
r := multipart.NewReader(mfr, mfr.Boundary())
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
ci, err := clusters[0].AddFile(context.Background(), r, params)
|
2019-08-11 18:08:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pinDelay()
|
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
|
|
|
if c == clusters[0] || c == clusters[1] {
|
|
|
|
pin := c.StatusLocal(ctx, ci)
|
|
|
|
if pin.Error != "" {
|
|
|
|
t.Error(pin.Error)
|
|
|
|
}
|
|
|
|
if pin.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("item should be pinned and is", pin.Status)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pin := c.StatusLocal(ctx, ci)
|
|
|
|
if pin.Status != api.TrackerStatusRemote {
|
|
|
|
t.Error("expected tracker status remote")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runF(t, clusters, f)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-09-26 11:42:20 +00:00
|
|
|
func TestAddPeerDown(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2018-09-26 11:42:20 +00:00
|
|
|
clusters, mock := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mock)
|
|
|
|
sth := test.NewShardingTestHelper()
|
|
|
|
defer sth.Clean(t)
|
2018-06-27 04:03:15 +00:00
|
|
|
err := clusters[0].Shutdown(ctx)
|
2018-09-26 11:42:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
|
|
|
|
t.Run("local", func(t *testing.T) {
|
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
|
|
|
mfr, closer := sth.GetTreeMultiReader(t)
|
|
|
|
defer closer.Close()
|
|
|
|
r := multipart.NewReader(mfr, mfr.Boundary())
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
ci, err := clusters[1].AddFile(context.Background(), r, params)
|
2018-09-26 11:42:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if ci.String() != test.ShardingDirBalancedRootCID {
|
|
|
|
t.Fatal("unexpected root CID for local add")
|
|
|
|
}
|
|
|
|
|
2019-07-30 12:00:55 +00:00
|
|
|
// We need to sleep a lot because it takes time to
|
|
|
|
// catch up on a first/single pin on crdts
|
|
|
|
time.Sleep(10 * time.Second)
|
2018-09-26 11:42:20 +00:00
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
|
|
|
if c.id == clusters[0].id {
|
|
|
|
return
|
|
|
|
}
|
2018-06-27 04:03:15 +00:00
|
|
|
pin := c.StatusLocal(ctx, ci)
|
2018-09-26 11:42:20 +00:00
|
|
|
if pin.Error != "" {
|
|
|
|
t.Error(pin.Error)
|
|
|
|
}
|
|
|
|
if pin.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("item should be pinned")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
runF(t, clusters, f)
|
|
|
|
})
|
|
|
|
}
|
2019-08-05 05:01:07 +00:00
|
|
|
|
|
|
|
func TestAddOnePeerFails(t *testing.T) {
|
|
|
|
clusters, mock := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mock)
|
|
|
|
sth := test.NewShardingTestHelper()
|
|
|
|
defer sth.Clean(t)
|
|
|
|
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
|
|
|
|
t.Run("local", func(t *testing.T) {
|
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
2019-08-13 14:03:55 +00:00
|
|
|
lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB
|
2019-08-05 05:01:07 +00:00
|
|
|
defer closer.Close()
|
|
|
|
|
|
|
|
mr := files.NewMultiFileReader(lg, true)
|
|
|
|
r := multipart.NewReader(mr, mr.Boundary())
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
_, err := clusters[0].AddFile(context.Background(), r, params)
|
2019-08-05 05:01:07 +00:00
|
|
|
if err != nil {
|
2020-04-14 17:58:00 +00:00
|
|
|
t.Error(err)
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-08-13 16:19:10 +00:00
|
|
|
// Disconnect 1 cluster (the last). Things should keep working.
|
|
|
|
// Important that we close the hosts, otherwise the RPC
|
2019-08-13 14:03:55 +00:00
|
|
|
// Servers keep working along with BlockPuts.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-08-13 16:19:10 +00:00
|
|
|
c := clusters[nClusters-1]
|
|
|
|
c.Shutdown(context.Background())
|
|
|
|
c.dht.Close()
|
|
|
|
c.host.Close()
|
2019-08-05 05:01:07 +00:00
|
|
|
wg.Wait()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAddAllPeersFail(t *testing.T) {
|
|
|
|
ctx := context.Background()
|
|
|
|
clusters, mock := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mock)
|
|
|
|
sth := test.NewShardingTestHelper()
|
|
|
|
defer sth.Clean(t)
|
|
|
|
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
|
|
|
|
t.Run("local", func(t *testing.T) {
|
2019-08-13 14:03:55 +00:00
|
|
|
// Prevent added content to be allocated to cluster 0
|
|
|
|
// as it is already going to have something.
|
|
|
|
_, err := clusters[0].Pin(ctx, test.Cid1, api.PinOptions{
|
|
|
|
ReplicationFactorMin: 1,
|
|
|
|
ReplicationFactorMax: 1,
|
|
|
|
UserAllocations: []peer.ID{clusters[0].host.ID()},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ttlDelay()
|
|
|
|
|
2019-08-05 05:01:07 +00:00
|
|
|
params := api.DefaultAddParams()
|
|
|
|
params.Shard = false
|
|
|
|
params.Name = "testlocal"
|
2019-08-13 14:03:55 +00:00
|
|
|
// Allocate to every peer except 0 (which already has a pin)
|
|
|
|
params.PinOptions.ReplicationFactorMax = nClusters - 1
|
|
|
|
params.PinOptions.ReplicationFactorMin = nClusters - 1
|
2019-08-05 05:01:07 +00:00
|
|
|
|
2019-08-13 14:03:55 +00:00
|
|
|
lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB
|
|
|
|
defer closer.Close()
|
2019-08-05 05:01:07 +00:00
|
|
|
mr := files.NewMultiFileReader(lg, true)
|
|
|
|
r := multipart.NewReader(mr, mr.Boundary())
|
|
|
|
|
|
|
|
// var cid cid.Cid
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
_, err := clusters[0].AddFile(context.Background(), r, params)
|
2019-08-13 14:03:55 +00:00
|
|
|
if err != adder.ErrBlockAdder {
|
2020-04-14 17:58:00 +00:00
|
|
|
t.Error("expected ErrBlockAdder. Got: ", err)
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-08-13 14:03:55 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-08-05 05:01:07 +00:00
|
|
|
|
2019-08-13 14:03:55 +00:00
|
|
|
// Shutdown all clusters except 0 to see the right error.
|
|
|
|
// Important that we shut down the hosts, otherwise
|
|
|
|
// the RPC Servers keep working along with BlockPuts.
|
|
|
|
// Note that this kills raft.
|
2019-08-13 16:19:10 +00:00
|
|
|
runF(t, clusters[1:], func(t *testing.T, c *Cluster) {
|
|
|
|
c.Shutdown(ctx)
|
|
|
|
c.dht.Close()
|
|
|
|
c.host.Close()
|
|
|
|
})
|
2019-08-05 05:01:07 +00:00
|
|
|
wg.Wait()
|
|
|
|
})
|
|
|
|
}
|