ipfs-cluster/pstoremgr/pstoremgr_test.go
Hector Sanjuan 196aa23f34 Fix #787: Connectivity fixes
Currently, unless doing Join() (--bootstrap), we do not connect to any peers on startup.

We however loaded up the peerstore file and Raft will automatically connect
older peers to figure out who is the leader etc. DHT bootstrap, after Raft
was working, did the rest.

For CRDTs we need to connect to people on a normal boot as otherwise, unless
bootstrapping, this does not happen, even if the peerstore contains known peers.

This introduces a number of changes:

* Move peerstore file management back inside the Cluster component, which was
already in charge of saving the peerstore file.
* We keep saving all "known addresses" but we load them with a non permanent
TTL, so that there will be clean up of peers we're not connected to for long.
* "Bootstrap" (connect) to a small number of peers during Cluster component creation.
* Bootstrap the DHT asap after this, so that other cluster components can
initialize with a working peer discovery mechanism.
* CRDT Trust() method will now:
  * Protect the trusted Peer ID in the conn manager
  * Give top priority in the PeerManager to that Peer (see below)
  * Mark addresses as permanent in the Peerstore

The PeerManager now attaches priorities to peers when importing them and is
able to order them according to that priority. The result is that peers with
high priority are saved first in the peerstore file. When we load the peerstore
file, the first entries in it are given the highest priority.

This means that during startup we will connect to "trusted peers" first
(because they have been tagged with priority in the previous run and saved at
the top of the list). Once connected to a small number of peers, we let the
DHT bootstrap process in the background do the rest and discover the network.

All this makes the peerstore file a "bootstrap" list for CRDTs and we will attempt
to connect to peers on that list until some of those connections succeed.
2019-05-27 14:27:23 +02:00

196 lines
4.3 KiB
Go

package pstoremgr
import (
"context"
"os"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
func makeMgr(t *testing.T) *Manager {
h, err := libp2p.New(context.Background())
if err != nil {
t.Fatal(err)
}
return New(context.Background(), h, "peerstore")
}
func clean(pm *Manager) {
if path := pm.peerstorePath; path != "" {
os.RemoveAll(path)
}
}
func testAddr(loc string, pid peer.ID) ma.Multiaddr {
m, _ := ma.NewMultiaddr(loc + "/ipfs/" + peer.IDB58Encode(pid))
return m
}
func TestManager(t *testing.T) {
pm := makeMgr(t)
defer clean(pm)
loc := "/ip4/127.0.0.1/tcp/1234"
testAddr := testAddr(loc, test.PeerID1)
_, err := pm.ImportPeer(testAddr, false, time.Minute)
if err != nil {
t.Fatal(err)
}
peers := []peer.ID{test.PeerID1, pm.host.ID()}
pinfos := pm.PeerInfos(peers)
if len(pinfos) != 1 {
t.Fatal("expected 1 peerinfo")
}
if pinfos[0].ID != test.PeerID1 {
t.Error("expected same peer as added")
}
if len(pinfos[0].Addrs) != 1 {
t.Fatal("expected an address")
}
if pinfos[0].Addrs[0].String() != loc {
t.Error("expected same address as added")
}
pm.RmPeer(peers[0])
pinfos = pm.PeerInfos(peers)
if len(pinfos) != 0 {
t.Fatal("expected 0 pinfos")
}
}
func TestManagerDNS(t *testing.T) {
pm := makeMgr(t)
defer clean(pm)
loc1 := "/ip4/127.0.0.1/tcp/1234"
testAddr1 := testAddr(loc1, test.PeerID1)
loc2 := "/dns4/localhost/tcp/1235"
testAddr2 := testAddr(loc2, test.PeerID1)
err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2}, false, time.Minute)
if err != nil {
t.Fatal(err)
}
pinfos := pm.PeerInfos([]peer.ID{test.PeerID1})
if len(pinfos) != 1 {
t.Fatal("expected 1 pinfo")
}
if len(pinfos[0].Addrs) != 1 {
t.Error("expected a single address")
}
if pinfos[0].Addrs[0].String() != "/dns4/localhost/tcp/1235" {
t.Error("expected the dns address")
}
}
func TestPeerstore(t *testing.T) {
pm := makeMgr(t)
defer clean(pm)
loc1 := "/ip4/127.0.0.1/tcp/1234"
testAddr1 := testAddr(loc1, test.PeerID1)
loc2 := "/ip4/127.0.0.1/tcp/1235"
testAddr2 := testAddr(loc2, test.PeerID1)
err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2}, false, time.Minute)
if err != nil {
t.Fatal(err)
}
pm.SavePeerstoreForPeers([]peer.ID{test.PeerID1})
pm2 := makeMgr(t)
defer clean(pm2)
err = pm2.ImportPeersFromPeerstore(false, time.Minute)
if err != nil {
t.Fatal(err)
}
pinfos := pm2.PeerInfos([]peer.ID{test.PeerID1})
if len(pinfos) != 1 {
t.Fatal("expected 1 peer in the peerstore")
}
if len(pinfos[0].Addrs) != 2 {
t.Error("expected 2 addresses")
}
}
func TestPriority(t *testing.T) {
pm := makeMgr(t)
defer clean(pm)
loc1 := "/ip4/127.0.0.1/tcp/1234"
testAddr1 := testAddr(loc1, test.PeerID1)
loc2 := "/ip4/127.0.0.2/tcp/1235"
testAddr2 := testAddr(loc2, test.PeerID2)
loc3 := "/ip4/127.0.0.3/tcp/1234"
testAddr3 := testAddr(loc3, test.PeerID3)
loc4 := "/ip4/127.0.0.4/tcp/1235"
testAddr4 := testAddr(loc4, test.PeerID4)
err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2, testAddr3, testAddr4}, false, time.Minute)
if err != nil {
t.Fatal(err)
}
pinfos := pm.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
if len(pinfos) != 4 {
t.Fatal("expected 4 pinfos")
}
if pinfos[0].ID != test.PeerID1 ||
pinfos[1].ID != test.PeerID2 ||
pinfos[2].ID != test.PeerID3 ||
pinfos[3].ID != test.PeerID4 {
t.Error("wrong order of peerinfos")
}
pm.SetPriority(test.PeerID1, 100)
pinfos = pm.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
if len(pinfos) != 4 {
t.Fatal("expected 4 pinfos")
}
if pinfos[3].ID != test.PeerID1 {
t.Fatal("PeerID1 should be last in the list")
}
pm.SavePeerstoreForPeers([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
pm2 := makeMgr(t)
defer clean(pm2)
err = pm2.ImportPeersFromPeerstore(false, time.Minute)
if err != nil {
t.Fatal(err)
}
pinfos = pm2.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1})
if len(pinfos) != 4 {
t.Fatal("expected 4 pinfos")
}
if pinfos[0].ID != test.PeerID2 ||
pinfos[1].ID != test.PeerID3 ||
pinfos[2].ID != test.PeerID4 ||
pinfos[3].ID != test.PeerID1 {
t.Error("wrong order of peerinfos")
}
}