2017-01-30 12:12:25 +00:00
|
|
|
package ipfscluster
|
|
|
|
|
|
|
|
import (
|
2018-07-17 10:51:31 +00:00
|
|
|
"context"
|
2018-01-16 19:57:54 +00:00
|
|
|
"fmt"
|
2017-01-30 12:12:25 +00:00
|
|
|
"sync"
|
|
|
|
"testing"
|
2017-02-02 22:52:06 +00:00
|
|
|
"time"
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2017-03-08 17:28:43 +00:00
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
2019-05-08 16:24:59 +00:00
|
|
|
"github.com/ipfs/ipfs-cluster/config"
|
2017-02-09 15:29:17 +00:00
|
|
|
"github.com/ipfs/ipfs-cluster/test"
|
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
2019-06-14 10:41:11 +00:00
|
|
|
host "github.com/libp2p/go-libp2p-core/host"
|
|
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
2017-01-30 12:12:25 +00:00
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
|
|
|
)
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
func peerManagerClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock, host.Host) {
|
2017-01-30 12:12:25 +00:00
|
|
|
cls := make([]*Cluster, nClusters, nClusters)
|
2017-02-09 15:29:17 +00:00
|
|
|
mocks := make([]*test.IpfsMock, nClusters, nClusters)
|
2017-01-30 12:12:25 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < nClusters; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
2017-07-04 20:39:10 +00:00
|
|
|
cl, m := createOnePeerCluster(t, i, testingClusterSecret)
|
2017-01-30 12:12:25 +00:00
|
|
|
cls[i] = cl
|
|
|
|
mocks[i] = m
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-07-17 10:51:31 +00:00
|
|
|
|
2019-05-08 16:24:59 +00:00
|
|
|
// Creat an identity
|
|
|
|
ident, err := config.NewIdentity()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
// Create a config
|
|
|
|
cfg := &Config{}
|
|
|
|
cfg.Default()
|
|
|
|
listen, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
|
|
|
|
cfg.ListenAddr = listen
|
|
|
|
cfg.Secret = testingClusterSecret
|
|
|
|
|
|
|
|
// Create a bootstrapping libp2p host
|
2019-05-08 16:24:59 +00:00
|
|
|
h, _, dht, err := NewClusterHost(context.Background(), ident, cfg)
|
2019-02-20 14:24:25 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect all peers to that host. This will allow that they
|
|
|
|
// can discover each others via DHT.
|
|
|
|
for i := 0; i < nClusters; i++ {
|
2018-07-17 10:51:31 +00:00
|
|
|
err := cls[i].host.Connect(
|
|
|
|
context.Background(),
|
2019-06-14 10:41:11 +00:00
|
|
|
peer.AddrInfo{
|
2019-02-20 14:24:25 +00:00
|
|
|
ID: h.ID(),
|
|
|
|
Addrs: h.Addrs(),
|
2018-07-17 10:51:31 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
dht.Bootstrap(context.Background())
|
2018-07-17 10:51:31 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
return cls, mocks, h
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func clusterAddr(c *Cluster) ma.Multiaddr {
|
2018-10-25 12:55:01 +00:00
|
|
|
for _, a := range c.host.Addrs() {
|
|
|
|
if _, err := a.ValueForProtocol(ma.P_IP4); err == nil {
|
|
|
|
p := peer.IDB58Encode(c.id)
|
2019-08-15 11:19:26 +00:00
|
|
|
cAddr, _ := ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", a, p))
|
2018-10-25 12:55:01 +00:00
|
|
|
return cAddr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestClustersPeerAdd(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-01-30 12:12:25 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-01-30 12:12:25 +00:00
|
|
|
|
|
|
|
if len(clusters) < 2 {
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Skip("need at least 2 nodes for this test")
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 1; i < len(clusters); i++ {
|
2018-06-27 04:03:15 +00:00
|
|
|
id, err := clusters[0].PeerAdd(ctx, clusters[i].id)
|
2017-01-30 12:12:25 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-05-09 14:33:59 +00:00
|
|
|
if !containsPeer(id.ClusterPeers, clusters[0].id) {
|
2017-01-30 12:12:25 +00:00
|
|
|
// ClusterPeers is originally empty and contains nodes as we add them
|
2017-11-08 19:04:04 +00:00
|
|
|
t.Log(i, id.ClusterPeers)
|
2017-01-30 12:12:25 +00:00
|
|
|
t.Fatal("cluster peers should be up to date with the cluster")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-27 20:19:10 +00:00
|
|
|
h := test.Cid1
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
_, err := clusters[1].Pin(ctx, h, api.PinOptions{})
|
2017-02-01 17:16:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-03-16 16:37:39 +00:00
|
|
|
pinDelay()
|
2017-01-30 12:12:25 +00:00
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ids := c.Peers(ctx)
|
2017-01-30 12:12:25 +00:00
|
|
|
|
|
|
|
// check they are tracked by the peer manager
|
|
|
|
if len(ids) != nClusters {
|
2017-02-02 22:52:06 +00:00
|
|
|
//t.Log(ids)
|
2017-01-30 12:12:25 +00:00
|
|
|
t.Error("added clusters are not part of clusters")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that they are part of the consensus
|
2019-02-20 14:24:25 +00:00
|
|
|
pins, err := c.Pins(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
if len(pins) != 1 {
|
2017-02-01 17:16:09 +00:00
|
|
|
t.Log(pins)
|
2017-01-30 12:12:25 +00:00
|
|
|
t.Error("expected 1 pin everywhere")
|
|
|
|
}
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
if len(c.ID(ctx).ClusterPeers) != nClusters {
|
|
|
|
t.Log(c.ID(ctx).ClusterPeers)
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Error("By now cluster peers should reflect all peers")
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
2018-07-17 10:51:31 +00:00
|
|
|
}
|
|
|
|
runF(t, clusters, f)
|
|
|
|
|
|
|
|
for _, c := range clusters {
|
2018-06-27 04:03:15 +00:00
|
|
|
c.Shutdown(ctx)
|
2018-07-17 10:51:31 +00:00
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
f2 := func(t *testing.T, c *Cluster) {
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
// check that all peers are part of the peerstore
|
|
|
|
// (except ourselves)
|
|
|
|
addrs := c.peerManager.LoadPeerstore()
|
|
|
|
peerMap := make(map[peer.ID]struct{})
|
|
|
|
for _, a := range addrs {
|
2019-06-14 10:41:11 +00:00
|
|
|
pinfo, err := peer.AddrInfoFromP2pAddr(a)
|
Feat: emancipate Consensus from the Cluster component
This commit promotes the Consensus component (and Raft) to become a fully
independent thing like other components, passed to NewCluster during
initialization. Cluster (main component) no longer creates the consensus
layer internally. This has triggered a number of breaking changes
that I will explain below.
Motivation: Future work will require the possibility of running Cluster
with a consensus layer that is not Raft. The "consensus" layer is in charge
of maintaining two things:
* The current cluster peerset, as required by the implementation
* The current cluster pinset (shared state)
While the pinset maintenance has always been in the consensus layer, the
peerset maintenance was handled by the main component (starting by the "peers"
key in the configuration) AND the Raft component (internally)
and this generated lots of confusion: if the user edited the peers in the
configuration they would be greeted with an error.
The bootstrap process (adding a peer to an existing cluster) and configuration
key also complicated many things, since the main component did it, but only
when the consensus was initialized and in single peer mode.
In all this we also mixed the peerstore (list of peer addresses in the libp2p
host) with the peerset, when they need not to be linked.
By initializing the consensus layer before calling NewCluster, all the
difficulties in maintaining the current implementation in the same way
have come to light. Thus, the following changes have been introduced:
* Remove "peers" and "bootstrap" keys from the configuration: we no longer
edit or save the configuration files. This was a very bad practice, requiring
write permissions by the process to the file containing the private key and
additionally made things like Puppet deployments of cluster difficult as
configuration would mutate from its initial version. Needless to say all the
maintenance associated to making sure peers and bootstrap had correct values
when peers are bootstrapped or removed. A loud and detailed error message has
been added when staring cluster with an old config, along with instructions on
how to move forward.
* Introduce a PeerstoreFile ("peerstore") which stores peer addresses: in
ipfs, the peerstore is not persisted because it can be re-built from the
network bootstrappers and the DHT. Cluster should probably also allow
discoverability of peers addresses (when not bootstrapping, as in that case
we have it), but in the meantime, we will read and persist the peerstore
addresses for cluster peers in this file, different from the configuration.
Note that dns multiaddresses are now fully supported and no IPs are saved
when we have DNS multiaddresses for a peer.
* The former "peer_manager" code is now a pstoremgr module, providing utilities
to parse, add, list and generally maintain the libp2p host peerstore, including
operations on the PeerstoreFile. This "pstoremgr" can now also be extended to
perform address autodiscovery and other things indepedently from Cluster.
* Create and initialize Raft outside of the main Cluster component: since we
can now launch Raft independently from Cluster, we have more degrees of
freedom. A new "staging" option when creating the object allows a raft peer to
be launched in Staging mode, waiting to be added to a running consensus, and
thus, not electing itself as leader or doing anything like we were doing
before. This additionally allows us to track when the peer has become a
Voter, which only happens when it's caught up with the state, something that
was wonky previously.
* The raft configuration now includes an InitPeerset key, which allows to
provide a peerset for new peers and which is ignored when staging==true. The
whole Raft initialization code is way cleaner and stronger now.
* Cluster peer bootsrapping is now an ipfs-cluster-service feature. The
--bootstrap flag works as before (additionally allowing comma-separated-list
of entries). What bootstrap does, is to initialize Raft with staging == true,
and then call Join in the main cluster component. Only when the Raft peer
transitions to Voter, consensus becomes ready, and cluster becomes Ready.
This is cleaner, works better and is less complex than before (supporting
both flags and config values). We also backup and clean the state whenever
we are boostrapping, automatically
* ipfs-cluster-service no longer runs the daemon. Starting cluster needs
now "ipfs-cluster-service daemon". The daemon specific flags (bootstrap,
alloc) are now flags for the daemon subcommand. Here we mimic ipfs ("ipfs"
does not start the daemon but print help) and pave the path for merging both
service and ctl in the future.
While this brings some breaking changes, it significantly reduces the
complexity of the configuration, the code and most importantly, the
documentation. It should be easier now to explain the user what is the
right way to launch a cluster peer, and more difficult to make mistakes.
As a side effect, the PR also:
* Fixes #381 - peers with dynamic addresses
* Fixes #371 - peers should be Raft configuration option
* Fixes #378 - waitForUpdates may return before state fully synced
* Fixes #235 - config option shadowing (no cfg saves, no need to shadow)
License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
2018-04-28 22:22:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
Fix #787: Connectivity fixes
Currently, unless doing Join() (--bootstrap), we do not connect to any peers on startup.
We however loaded up the peerstore file and Raft will automatically connect
older peers to figure out who is the leader etc. DHT bootstrap, after Raft
was working, did the rest.
For CRDTs we need to connect to people on a normal boot as otherwise, unless
bootstrapping, this does not happen, even if the peerstore contains known peers.
This introduces a number of changes:
* Move peerstore file management back inside the Cluster component, which was
already in charge of saving the peerstore file.
* We keep saving all "known addresses" but we load them with a non permanent
TTL, so that there will be clean up of peers we're not connected to for long.
* "Bootstrap" (connect) to a small number of peers during Cluster component creation.
* Bootstrap the DHT asap after this, so that other cluster components can
initialize with a working peer discovery mechanism.
* CRDT Trust() method will now:
* Protect the trusted Peer ID in the conn manager
* Give top priority in the PeerManager to that Peer (see below)
* Mark addresses as permanent in the Peerstore
The PeerManager now attaches priorities to peers when importing them and is
able to order them according to that priority. The result is that peers with
high priority are saved first in the peerstore file. When we load the peerstore
file, the first entries in it are given the highest priority.
This means that during startup we will connect to "trusted peers" first
(because they have been tagged with priority in the previous run and saved at
the top of the list). Once connected to a small number of peers, we let the
DHT bootstrap process in the background do the rest and discover the network.
All this makes the peerstore file a "bootstrap" list for CRDTs and we will attempt
to connect to peers on that list until some of those connections succeed.
2019-05-23 16:41:33 +00:00
|
|
|
peerMap[pinfo.ID] = struct{}{}
|
2017-11-10 16:32:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
if len(peerMap) == 0 {
|
|
|
|
t.Errorf("%s: peerstore to store at least 1 peer", c.id)
|
2017-11-10 16:32:07 +00:00
|
|
|
}
|
2018-07-17 10:51:31 +00:00
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
2018-07-17 10:51:31 +00:00
|
|
|
runF(t, clusters, f2)
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
func TestClustersJoinBadPeer(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-01-30 12:12:25 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-01-30 12:12:25 +00:00
|
|
|
|
|
|
|
if len(clusters) < 2 {
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Skip("need at least 2 nodes for this test")
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
addr := clusterAddr(clusters[1])
|
2018-01-16 19:57:54 +00:00
|
|
|
|
2017-01-30 12:12:25 +00:00
|
|
|
// We add a cluster that has been shutdown
|
|
|
|
// (closed transports)
|
2018-06-27 04:03:15 +00:00
|
|
|
clusters[1].Shutdown(ctx)
|
2018-01-16 19:57:54 +00:00
|
|
|
|
|
|
|
// Let the OS actually close the ports.
|
|
|
|
// Sometimes we hang otherwise.
|
|
|
|
delay()
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
err := clusters[0].Join(ctx, addr)
|
2017-01-30 12:12:25 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("expected an error")
|
|
|
|
}
|
2018-06-27 04:03:15 +00:00
|
|
|
ids := clusters[0].Peers(ctx)
|
2017-01-30 12:12:25 +00:00
|
|
|
if len(ids) != 1 {
|
|
|
|
t.Error("cluster should have only one member")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClustersPeerAddInUnhealthyCluster(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-01-30 12:12:25 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-01-30 12:12:25 +00:00
|
|
|
|
|
|
|
if len(clusters) < 3 {
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Skip("need at least 3 nodes for this test")
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
_, err := clusters[0].PeerAdd(ctx, clusters[1].id)
|
2019-05-09 14:33:59 +00:00
|
|
|
ttlDelay()
|
2018-06-27 04:03:15 +00:00
|
|
|
ids := clusters[1].Peers(ctx)
|
2017-01-30 12:12:25 +00:00
|
|
|
if len(ids) != 2 {
|
|
|
|
t.Error("expected 2 peers")
|
|
|
|
}
|
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
// Now we shutdown the one member of the running cluster
|
2017-01-30 12:12:25 +00:00
|
|
|
// and try to add someone else.
|
2018-06-27 04:03:15 +00:00
|
|
|
err = clusters[1].Shutdown(ctx)
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error("Shutdown should be clean: ", err)
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
switch consensus {
|
|
|
|
case "raft":
|
|
|
|
delay() // This makes sure the leader realizes that it's not
|
|
|
|
// leader anymore. Otherwise it commits fine.
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
_, err = clusters[0].PeerAdd(ctx, clusters[2].id)
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("expected an error")
|
|
|
|
}
|
|
|
|
|
|
|
|
ids = clusters[0].Peers(ctx)
|
|
|
|
if len(ids) != 2 {
|
|
|
|
t.Error("cluster should still have 2 peers")
|
|
|
|
}
|
|
|
|
case "crdt":
|
|
|
|
// crdt does not really care whether we add or remove
|
|
|
|
|
|
|
|
delay() // let metrics expire
|
|
|
|
_, err = clusters[0].PeerAdd(ctx, clusters[2].id)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
2019-05-09 19:24:56 +00:00
|
|
|
ttlDelay()
|
2019-02-20 14:24:25 +00:00
|
|
|
ids = clusters[0].Peers(ctx)
|
|
|
|
if len(ids) != 2 {
|
|
|
|
t.Error("cluster should have 2 peers after removing and adding 1")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
t.Fatal("bad consensus")
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClustersPeerRemove(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2017-02-02 22:52:06 +00:00
|
|
|
clusters, mocks := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
|
|
|
|
|
|
|
if len(clusters) < 2 {
|
|
|
|
t.Skip("test needs at least 2 clusters")
|
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
switch consensus {
|
|
|
|
case "crdt":
|
|
|
|
// Peer Rm is a no op.
|
|
|
|
return
|
|
|
|
case "raft":
|
|
|
|
p := clusters[1].ID(ctx).ID
|
|
|
|
err := clusters[0].PeerRemove(ctx, p)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
delay()
|
2017-02-08 17:04:08 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
f := func(t *testing.T, c *Cluster) {
|
|
|
|
if c.ID(ctx).ID == p { //This is the removed cluster
|
|
|
|
_, ok := <-c.Done()
|
|
|
|
if ok {
|
|
|
|
t.Error("removed peer should have exited")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ids := c.Peers(ctx)
|
|
|
|
if len(ids) != nClusters-1 {
|
|
|
|
t.Error("should have removed 1 peer")
|
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
runF(t, clusters, f)
|
|
|
|
default:
|
|
|
|
t.Fatal("bad consensus")
|
|
|
|
}
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
|
|
|
|
2017-10-31 10:20:14 +00:00
|
|
|
func TestClustersPeerRemoveSelf(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2017-10-31 10:20:14 +00:00
|
|
|
// this test hangs sometimes if there are problems
|
2017-02-02 22:52:06 +00:00
|
|
|
clusters, mocks := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
switch consensus {
|
|
|
|
case "crdt":
|
|
|
|
// remove is a no op in CRDTs
|
|
|
|
return
|
|
|
|
|
|
|
|
case "raft":
|
|
|
|
for i := 0; i < len(clusters); i++ {
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
peers := clusters[i].Peers(ctx)
|
|
|
|
t.Logf("Current cluster size: %d", len(peers))
|
|
|
|
if len(peers) != (len(clusters) - i) {
|
|
|
|
t.Fatal("Previous peers not removed correctly")
|
|
|
|
}
|
|
|
|
err := clusters[i].PeerRemove(ctx, clusters[i].ID(ctx).ID)
|
|
|
|
// Last peer member won't be able to remove itself
|
|
|
|
// In this case, we shut it down.
|
|
|
|
if err != nil {
|
|
|
|
if i != len(clusters)-1 { //not last
|
|
|
|
t.Error(err)
|
|
|
|
} else {
|
|
|
|
err := clusters[i].Shutdown(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-10-23 11:46:37 +00:00
|
|
|
}
|
|
|
|
}
|
2018-08-15 10:30:00 +00:00
|
|
|
// potential hanging place
|
2019-02-20 14:24:25 +00:00
|
|
|
_, more := <-clusters[i].Done()
|
|
|
|
if more {
|
|
|
|
t.Error("should be done")
|
|
|
|
}
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("bad consensus")
|
2017-02-02 22:52:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 22:29:56 +00:00
|
|
|
func TestClustersPeerRemoveLeader(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2017-11-14 22:29:56 +00:00
|
|
|
// this test is like the one above, except it always
|
|
|
|
// removes the current leader.
|
|
|
|
// this test hangs sometimes if there are problems
|
|
|
|
clusters, mocks := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
switch consensus {
|
|
|
|
case "crdt":
|
|
|
|
return
|
|
|
|
case "raft":
|
|
|
|
|
2019-05-16 10:30:51 +00:00
|
|
|
findLeader := func(t *testing.T) *Cluster {
|
2019-02-20 14:24:25 +00:00
|
|
|
var l peer.ID
|
|
|
|
for _, c := range clusters {
|
|
|
|
if !c.shutdownB {
|
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
|
|
|
l, _ = c.consensus.Leader(ctx)
|
|
|
|
}
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
for _, c := range clusters {
|
|
|
|
if c.id == l {
|
|
|
|
return c
|
|
|
|
}
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
2019-05-16 10:30:51 +00:00
|
|
|
t.Fatal("no leader found")
|
2019-02-20 14:24:25 +00:00
|
|
|
return nil
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
for i := 0; i < len(clusters); i++ {
|
2019-05-16 10:30:51 +00:00
|
|
|
leader := findLeader(t)
|
2019-02-20 14:24:25 +00:00
|
|
|
peers := leader.Peers(ctx)
|
|
|
|
t.Logf("Current cluster size: %d", len(peers))
|
|
|
|
if len(peers) != (len(clusters) - i) {
|
|
|
|
t.Fatal("Previous peers not removed correctly")
|
|
|
|
}
|
|
|
|
err := leader.PeerRemove(ctx, leader.id)
|
|
|
|
// Last peer member won't be able to remove itself
|
|
|
|
// In this case, we shut it down.
|
|
|
|
if err != nil {
|
|
|
|
if i != len(clusters)-1 { //not last
|
|
|
|
t.Error(err)
|
|
|
|
} else {
|
|
|
|
err := leader.Shutdown(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
_, more := <-leader.Done()
|
|
|
|
if more {
|
|
|
|
t.Error("should be done")
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second / 2)
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
default:
|
|
|
|
t.Fatal("bad consensus")
|
2017-11-14 22:29:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-31 10:20:14 +00:00
|
|
|
func TestClustersPeerRemoveReallocsPins(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2017-07-03 15:45:22 +00:00
|
|
|
clusters, mocks := createClusters(t)
|
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
if consensus == "crdt" {
|
|
|
|
t.Log("FIXME when re-alloc changes come through")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-03 15:45:22 +00:00
|
|
|
if len(clusters) < 3 {
|
|
|
|
t.Skip("test needs at least 3 clusters")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adjust the replication factor for re-allocation
|
|
|
|
for _, c := range clusters {
|
2018-01-12 17:04:46 +00:00
|
|
|
c.config.ReplicationFactorMin = nClusters - 1
|
|
|
|
c.config.ReplicationFactorMax = nClusters - 1
|
2017-07-03 15:45:22 +00:00
|
|
|
}
|
|
|
|
|
2017-10-31 10:20:14 +00:00
|
|
|
// We choose to remove the leader, to make things even more interesting
|
2018-06-27 04:03:15 +00:00
|
|
|
leaderID, err := clusters[0].consensus.Leader(ctx)
|
2017-10-31 10:20:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var leader *Cluster
|
2017-11-10 20:26:37 +00:00
|
|
|
var leaderi int
|
|
|
|
for i, cl := range clusters {
|
2018-06-27 04:03:15 +00:00
|
|
|
if id := cl.ID(ctx).ID; id == leaderID {
|
2017-10-31 10:20:14 +00:00
|
|
|
leader = cl
|
2017-11-10 20:26:37 +00:00
|
|
|
leaderi = i
|
2017-10-31 10:20:14 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if leader == nil {
|
|
|
|
t.Fatal("did not find a leader?")
|
|
|
|
}
|
2017-07-03 15:45:22 +00:00
|
|
|
|
2017-11-10 20:26:37 +00:00
|
|
|
leaderMock := mocks[leaderi]
|
|
|
|
|
|
|
|
// Remove leader from set
|
|
|
|
clusters = append(clusters[:leaderi], clusters[leaderi+1:]...)
|
|
|
|
mocks = append(mocks[:leaderi], mocks[leaderi+1:]...)
|
2018-06-27 04:03:15 +00:00
|
|
|
defer leader.Shutdown(ctx)
|
2017-11-10 20:26:37 +00:00
|
|
|
defer leaderMock.Close()
|
|
|
|
|
2019-02-27 20:19:10 +00:00
|
|
|
prefix := test.Cid1.Prefix()
|
2017-07-03 15:45:22 +00:00
|
|
|
|
|
|
|
// Pin nCluster random pins. This ensures each peer will
|
|
|
|
// pin the same number of Cids.
|
|
|
|
for i := 0; i < nClusters; i++ {
|
|
|
|
h, err := prefix.Sum(randomBytes())
|
|
|
|
checkErr(t, err)
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
_, err = leader.Pin(ctx, h, api.PinOptions{})
|
2017-07-03 15:45:22 +00:00
|
|
|
checkErr(t, err)
|
2018-03-16 16:37:39 +00:00
|
|
|
ttlDelay()
|
2017-07-03 15:45:22 +00:00
|
|
|
}
|
|
|
|
|
2018-03-16 16:37:39 +00:00
|
|
|
pinDelay()
|
2017-07-03 15:45:22 +00:00
|
|
|
|
2018-05-02 11:49:19 +00:00
|
|
|
// At this point, all peers must have nClusters -1 pins
|
|
|
|
// associated to them.
|
|
|
|
// Find out which pins are associated to the leader.
|
2018-09-22 01:00:10 +00:00
|
|
|
interestingCids := []cid.Cid{}
|
2017-07-03 15:45:22 +00:00
|
|
|
|
2019-02-20 14:24:25 +00:00
|
|
|
pins, err := leader.Pins(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-07-03 15:45:22 +00:00
|
|
|
if len(pins) != nClusters {
|
|
|
|
t.Fatal("expected number of tracked pins to be nClusters")
|
|
|
|
}
|
|
|
|
for _, p := range pins {
|
2017-10-31 10:20:14 +00:00
|
|
|
if containsPeer(p.Allocations, leaderID) {
|
|
|
|
//t.Logf("%s pins %s", leaderID, p.Cid)
|
2017-07-03 15:45:22 +00:00
|
|
|
interestingCids = append(interestingCids, p.Cid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(interestingCids) != nClusters-1 {
|
2017-08-29 00:53:16 +00:00
|
|
|
//t.Fatal("The number of allocated Cids is not expected")
|
2017-09-01 12:09:37 +00:00
|
|
|
t.Fatalf("Expected %d allocated CIDs but got %d", nClusters-1,
|
2017-08-29 00:53:16 +00:00
|
|
|
len(interestingCids))
|
2017-07-03 15:45:22 +00:00
|
|
|
}
|
|
|
|
|
2017-10-31 10:20:14 +00:00
|
|
|
// Now the leader removes itself
|
2018-06-27 04:03:15 +00:00
|
|
|
err = leader.PeerRemove(ctx, leaderID)
|
2017-07-03 15:45:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("error removing peer:", err)
|
|
|
|
}
|
|
|
|
|
2018-04-05 14:09:41 +00:00
|
|
|
delay()
|
2018-03-29 20:31:11 +00:00
|
|
|
waitForLeaderAndMetrics(t, clusters)
|
2018-04-05 14:09:41 +00:00
|
|
|
delay() // this seems to fail when not waiting enough...
|
2017-07-03 15:45:22 +00:00
|
|
|
|
|
|
|
for _, icid := range interestingCids {
|
|
|
|
// Now check that the allocations are new.
|
2018-06-27 04:03:15 +00:00
|
|
|
newPin, err := clusters[1].PinGet(ctx, icid)
|
2017-07-03 15:45:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("error getting the new allocations for", icid)
|
|
|
|
}
|
2017-10-31 10:20:14 +00:00
|
|
|
if containsPeer(newPin.Allocations, leaderID) {
|
2017-07-03 15:45:22 +00:00
|
|
|
t.Fatal("pin should not be allocated to the removed peer")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-02 22:52:06 +00:00
|
|
|
func TestClustersPeerJoin(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-02-02 22:52:06 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
if len(clusters) < 3 {
|
|
|
|
t.Skip("test needs at least 3 clusters")
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 1; i < len(clusters); i++ {
|
2018-06-27 04:03:15 +00:00
|
|
|
err := clusters[i].Join(ctx, clusterAddr(clusters[0]))
|
2017-02-02 22:52:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2017-01-30 12:12:25 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-09 19:24:56 +00:00
|
|
|
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
h := test.Cid1
|
|
|
|
clusters[0].Pin(ctx, h, api.PinOptions{})
|
2018-03-16 16:37:39 +00:00
|
|
|
pinDelay()
|
2017-01-30 12:12:25 +00:00
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
for _, p := range clusters {
|
|
|
|
t.Log(p.id.String())
|
|
|
|
}
|
|
|
|
|
2017-02-02 22:52:06 +00:00
|
|
|
f := func(t *testing.T, c *Cluster) {
|
2018-06-27 04:03:15 +00:00
|
|
|
peers := c.Peers(ctx)
|
|
|
|
str := c.id.String() + "\n"
|
|
|
|
for _, p := range peers {
|
|
|
|
str += " - " + p.ID.String() + "\n"
|
|
|
|
}
|
|
|
|
t.Log(str)
|
2017-02-02 22:52:06 +00:00
|
|
|
if len(peers) != nClusters {
|
|
|
|
t.Error("all peers should be connected")
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
pins, err := c.Pins(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
if len(pins) != 1 || !pins[0].Cid.Equals(h) {
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Error("all peers should have pinned the cid")
|
|
|
|
}
|
|
|
|
}
|
2017-01-30 12:12:25 +00:00
|
|
|
runF(t, clusters, f)
|
|
|
|
}
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
func TestClustersPeerJoinAllAtOnce(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-02-02 22:52:06 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
if len(clusters) < 2 {
|
|
|
|
t.Skip("test needs at least 2 clusters")
|
|
|
|
}
|
|
|
|
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
2018-06-27 04:03:15 +00:00
|
|
|
err := c.Join(ctx, clusterAddr(clusters[0]))
|
2017-02-02 22:52:06 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
runF(t, clusters[1:], f)
|
|
|
|
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
h := test.Cid1
|
|
|
|
clusters[0].Pin(ctx, h, api.PinOptions{})
|
2018-03-16 16:37:39 +00:00
|
|
|
pinDelay()
|
2017-02-02 22:52:06 +00:00
|
|
|
|
|
|
|
f2 := func(t *testing.T, c *Cluster) {
|
2018-06-27 04:03:15 +00:00
|
|
|
peers := c.Peers(ctx)
|
2017-02-02 22:52:06 +00:00
|
|
|
if len(peers) != nClusters {
|
|
|
|
t.Error("all peers should be connected")
|
|
|
|
}
|
2019-02-20 14:24:25 +00:00
|
|
|
pins, err := c.Pins(ctx)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
if len(pins) != 1 || !pins[0].Cid.Equals(h) {
|
2017-02-02 22:52:06 +00:00
|
|
|
t.Error("all peers should have pinned the cid")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
runF(t, clusters, f2)
|
|
|
|
}
|
|
|
|
|
2018-01-16 19:57:54 +00:00
|
|
|
// This test fails a lot when re-use port is not available (MacOS, Windows)
|
|
|
|
// func TestClustersPeerJoinAllAtOnceWithRandomBootstrap(t *testing.T) {
|
2019-02-20 14:24:25 +00:00
|
|
|
// clusters, mocks,boot := peerManagerClusters(t)
|
2018-01-16 19:57:54 +00:00
|
|
|
// defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
// defer boot.Close()
|
2018-01-16 19:57:54 +00:00
|
|
|
// if len(clusters) < 3 {
|
|
|
|
// t.Skip("test needs at least 3 clusters")
|
|
|
|
// }
|
|
|
|
|
|
|
|
// delay()
|
|
|
|
|
|
|
|
// // We have a 2 node cluster and the rest of nodes join
|
|
|
|
// // one of the two seeds randomly
|
|
|
|
|
|
|
|
// err := clusters[1].Join(clusterAddr(clusters[0]))
|
|
|
|
// if err != nil {
|
|
|
|
// t.Fatal(err)
|
|
|
|
// }
|
|
|
|
|
|
|
|
// f := func(t *testing.T, c *Cluster) {
|
|
|
|
// j := rand.Intn(2)
|
|
|
|
// err := c.Join(clusterAddr(clusters[j]))
|
|
|
|
// if err != nil {
|
|
|
|
// t.Fatal(err)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// runF(t, clusters[2:], f)
|
|
|
|
|
2019-02-27 20:19:10 +00:00
|
|
|
// hash := test.Cid1
|
2018-01-16 19:57:54 +00:00
|
|
|
// clusters[0].Pin(api.PinCid(hash))
|
|
|
|
// delay()
|
|
|
|
|
|
|
|
// f2 := func(t *testing.T, c *Cluster) {
|
|
|
|
// peers := c.Peers()
|
|
|
|
// if len(peers) != nClusters {
|
|
|
|
// peersIds := []peer.ID{}
|
|
|
|
// for _, p := range peers {
|
|
|
|
// peersIds = append(peersIds, p.ID)
|
|
|
|
// }
|
|
|
|
// t.Errorf("%s sees %d peers: %s", c.id, len(peers), peersIds)
|
|
|
|
// }
|
|
|
|
// pins := c.Pins()
|
|
|
|
// if len(pins) != 1 || !pins[0].Cid.Equals(hash) {
|
|
|
|
// t.Error("all peers should have pinned the cid")
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// runF(t, clusters, f2)
|
|
|
|
// }
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// Tests that a peer catches up on the state correctly after rejoining
|
|
|
|
func TestClustersPeerRejoin(t *testing.T) {
|
2018-06-27 04:03:15 +00:00
|
|
|
ctx := context.Background()
|
2019-02-20 14:24:25 +00:00
|
|
|
clusters, mocks, boot := peerManagerClusters(t)
|
2017-10-23 11:46:37 +00:00
|
|
|
defer shutdownClusters(t, clusters, mocks)
|
2019-02-20 14:24:25 +00:00
|
|
|
defer boot.Close()
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// pin something in c0
|
2019-02-27 20:19:10 +00:00
|
|
|
pin1 := test.Cid1
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
_, err := clusters[0].Pin(ctx, pin1, api.PinOptions{})
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// add all clusters
|
|
|
|
for i := 1; i < len(clusters); i++ {
|
2018-06-27 04:03:15 +00:00
|
|
|
err := clusters[i].Join(ctx, clusterAddr(clusters[0]))
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
delay()
|
|
|
|
|
|
|
|
// all added peers should have the content
|
|
|
|
for i := 1; i < len(clusters); i++ {
|
2018-06-27 04:03:15 +00:00
|
|
|
pinfo := clusters[i].tracker.Status(ctx, pin1)
|
2017-10-23 11:46:37 +00:00
|
|
|
if pinfo.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("Added peers should pin the content")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
clusters[0].config.LeaveOnShutdown = true
|
2018-06-27 04:03:15 +00:00
|
|
|
err = clusters[0].Shutdown(ctx)
|
2017-11-08 19:04:04 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-10-23 11:46:37 +00:00
|
|
|
mocks[0].Close()
|
|
|
|
|
2017-11-08 19:04:04 +00:00
|
|
|
delay()
|
|
|
|
|
|
|
|
// Forget peer so we can re-add one in same address/port
|
|
|
|
f := func(t *testing.T, c *Cluster) {
|
2019-02-20 14:24:25 +00:00
|
|
|
c.peerManager.RmPeer(clusters[0].id) // errors ignore for crdts
|
2017-11-08 19:04:04 +00:00
|
|
|
}
|
|
|
|
runF(t, clusters[1:], f)
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// Pin something on the rest
|
2019-02-27 20:19:10 +00:00
|
|
|
pin2 := test.Cid2
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
_, err = clusters[1].Pin(ctx, pin2, api.PinOptions{})
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-03-16 16:37:39 +00:00
|
|
|
pinDelay()
|
2017-10-23 11:46:37 +00:00
|
|
|
|
|
|
|
// Rejoin c0
|
|
|
|
c0, m0 := createOnePeerCluster(t, 0, testingClusterSecret)
|
|
|
|
clusters[0] = c0
|
|
|
|
mocks[0] = m0
|
2018-08-15 10:30:00 +00:00
|
|
|
|
|
|
|
delay()
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
err = c0.Join(ctx, clusterAddr(clusters[1]))
|
2017-10-23 11:46:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
delay()
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
pinfo := clusters[0].tracker.Status(ctx, pin2)
|
2017-10-23 11:46:37 +00:00
|
|
|
if pinfo.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("re-joined cluster should have caught up")
|
|
|
|
}
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
pinfo = clusters[0].tracker.Status(ctx, pin1)
|
2017-10-23 11:46:37 +00:00
|
|
|
if pinfo.Status != api.TrackerStatusPinned {
|
|
|
|
t.Error("re-joined cluster should have original pin")
|
|
|
|
}
|
|
|
|
}
|