3b3f786d68
This commit adds support for OpenCensus tracing and metrics collection. This required support for context.Context propogation throughout the cluster codebase, and in particular, the ipfscluster component interfaces. The tracing propogates across RPC and HTTP boundaries. The current default tracing backend is Jaeger. The metrics currently exports the metrics exposed by the opencensus http plugin as well as the pprof metrics to a prometheus endpoint for scraping. The current default metrics backend is Prometheus. Metrics are currently exposed by default due to low overhead, can be turned off if desired, whereas tracing is off by default as it has a much higher performance overhead, though the extent of the performance hit can be adjusted with smaller sampling rates. License: MIT Signed-off-by: Adrian Lanzafame <adrianlanzafame92@gmail.com>
326 lines
7.5 KiB
Go
326 lines
7.5 KiB
Go
package raft
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
"github.com/ipfs/ipfs-cluster/state/mapstate"
|
|
"github.com/ipfs/ipfs-cluster/test"
|
|
|
|
cid "github.com/ipfs/go-cid"
|
|
libp2p "github.com/libp2p/go-libp2p"
|
|
host "github.com/libp2p/go-libp2p-host"
|
|
peerstore "github.com/libp2p/go-libp2p-peerstore"
|
|
)
|
|
|
|
func cleanRaft(idn int) {
|
|
os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn))
|
|
}
|
|
|
|
func testPin(c cid.Cid) api.Pin {
|
|
p := api.PinCid(c)
|
|
p.ReplicationFactorMin = -1
|
|
p.ReplicationFactorMax = -1
|
|
return p
|
|
}
|
|
|
|
func makeTestingHost(t *testing.T) host.Host {
|
|
h, err := libp2p.New(
|
|
context.Background(),
|
|
libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
|
|
)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
return h
|
|
}
|
|
|
|
func testingConsensus(t *testing.T, idn int) *Consensus {
|
|
ctx := context.Background()
|
|
cleanRaft(idn)
|
|
h := makeTestingHost(t)
|
|
st := mapstate.NewMapState()
|
|
|
|
cfg := &Config{}
|
|
cfg.Default()
|
|
cfg.DataFolder = fmt.Sprintf("raftFolderFromTests-%d", idn)
|
|
cfg.hostShutdown = true
|
|
|
|
cc, err := NewConsensus(h, cfg, st, false)
|
|
if err != nil {
|
|
t.Fatal("cannot create Consensus:", err)
|
|
}
|
|
cc.SetClient(test.NewMockRPCClientWithHost(t, h))
|
|
<-cc.Ready(ctx)
|
|
return cc
|
|
}
|
|
|
|
func TestShutdownConsensus(t *testing.T) {
|
|
ctx := context.Background()
|
|
// Bring it up twice to make sure shutdown cleans up properly
|
|
// but also to make sure raft comes up ok when re-initialized
|
|
cc := testingConsensus(t, 1)
|
|
defer cleanRaft(1)
|
|
err := cc.Shutdown(ctx)
|
|
if err != nil {
|
|
t.Fatal("Consensus cannot shutdown:", err)
|
|
}
|
|
err = cc.Shutdown(ctx) // should be fine to shutdown twice
|
|
if err != nil {
|
|
t.Fatal("Consensus should be able to shutdown several times")
|
|
}
|
|
cleanRaft(1)
|
|
|
|
cc = testingConsensus(t, 1)
|
|
err = cc.Shutdown(ctx)
|
|
if err != nil {
|
|
t.Fatal("Consensus cannot shutdown:", err)
|
|
}
|
|
cleanRaft(1)
|
|
}
|
|
|
|
func TestConsensusPin(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
defer cleanRaft(1) // Remember defer runs in LIFO order
|
|
defer cc.Shutdown(ctx)
|
|
|
|
c, _ := cid.Decode(test.TestCid1)
|
|
err := cc.LogPin(ctx, testPin(c))
|
|
if err != nil {
|
|
t.Error("the operation did not make it to the log:", err)
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
st, err := cc.State(ctx)
|
|
if err != nil {
|
|
t.Fatal("error gettinng state:", err)
|
|
}
|
|
|
|
pins := st.List(ctx)
|
|
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
|
|
t.Error("the added pin should be in the state")
|
|
}
|
|
}
|
|
|
|
func TestConsensusUnpin(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
defer cleanRaft(1)
|
|
defer cc.Shutdown(ctx)
|
|
|
|
c, _ := cid.Decode(test.TestCid2)
|
|
err := cc.LogUnpin(ctx, api.PinCid(c))
|
|
if err != nil {
|
|
t.Error("the operation did not make it to the log:", err)
|
|
}
|
|
}
|
|
|
|
func TestConsensusUpdate(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
defer cleanRaft(1)
|
|
defer cc.Shutdown(ctx)
|
|
|
|
// Pin first
|
|
c1, _ := cid.Decode(test.TestCid1)
|
|
pin := testPin(c1)
|
|
pin.Type = api.ShardType
|
|
err := cc.LogPin(ctx, pin)
|
|
if err != nil {
|
|
t.Fatal("the initial operation did not make it to the log:", err)
|
|
}
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
// Update pin
|
|
c2, _ := cid.Decode(test.TestCid2)
|
|
pin.Reference = c2
|
|
err = cc.LogPin(ctx, pin)
|
|
if err != nil {
|
|
t.Error("the update op did not make it to the log:", err)
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
st, err := cc.State(ctx)
|
|
if err != nil {
|
|
t.Fatal("error getting state:", err)
|
|
}
|
|
|
|
pins := st.List(ctx)
|
|
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
|
|
t.Error("the added pin should be in the state")
|
|
}
|
|
if !pins[0].Reference.Equals(c2) {
|
|
t.Error("pin updated incorrectly")
|
|
}
|
|
}
|
|
|
|
func TestConsensusAddPeer(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
cc2 := testingConsensus(t, 2)
|
|
t.Log(cc.host.ID().Pretty())
|
|
t.Log(cc2.host.ID().Pretty())
|
|
defer cleanRaft(1)
|
|
defer cleanRaft(2)
|
|
defer cc.Shutdown(ctx)
|
|
defer cc2.Shutdown(ctx)
|
|
|
|
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
|
|
err := cc.AddPeer(ctx, cc2.host.ID())
|
|
if err != nil {
|
|
t.Error("the operation did not make it to the log:", err)
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
err = cc2.raft.WaitForPeer(ctx, cc.host.ID().Pretty(), false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
peers, err := cc2.raft.Peers(ctx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if len(peers) != 2 {
|
|
t.Error("peer was not added")
|
|
}
|
|
}
|
|
|
|
func TestConsensusRmPeer(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
cc2 := testingConsensus(t, 2)
|
|
defer cleanRaft(1)
|
|
defer cleanRaft(2)
|
|
defer cc.Shutdown(ctx)
|
|
defer cc2.Shutdown(ctx)
|
|
|
|
cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL)
|
|
|
|
err := cc.AddPeer(ctx, cc2.host.ID())
|
|
if err != nil {
|
|
t.Error("could not add peer:", err)
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
|
defer cancel()
|
|
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
cc.raft.WaitForLeader(ctx)
|
|
|
|
c, _ := cid.Decode(test.TestCid1)
|
|
err = cc.LogPin(ctx, testPin(c))
|
|
if err != nil {
|
|
t.Error("could not pin after adding peer:", err)
|
|
}
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
// Remove unexisting peer
|
|
err = cc.RmPeer(ctx, test.TestPeerID1)
|
|
if err != nil {
|
|
t.Error("the operation did not make it to the log:", err)
|
|
}
|
|
|
|
// Remove real peer. At least the leader can succeed
|
|
err = cc2.RmPeer(ctx, cc.host.ID())
|
|
err2 := cc.RmPeer(ctx, cc2.host.ID())
|
|
if err != nil && err2 != nil {
|
|
t.Error("could not remove peer:", err, err2)
|
|
}
|
|
|
|
err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), true)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
func TestConsensusLeader(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
pID := cc.host.ID()
|
|
defer cleanRaft(1)
|
|
defer cc.Shutdown(ctx)
|
|
l, err := cc.Leader(ctx)
|
|
if err != nil {
|
|
t.Fatal("No leader:", err)
|
|
}
|
|
|
|
if l != pID {
|
|
t.Errorf("expected %s but the leader appears as %s", pID, l)
|
|
}
|
|
}
|
|
|
|
func TestRaftLatestSnapshot(t *testing.T) {
|
|
ctx := context.Background()
|
|
cc := testingConsensus(t, 1)
|
|
defer cleanRaft(1)
|
|
defer cc.Shutdown(ctx)
|
|
|
|
// Make pin 1
|
|
c1, _ := cid.Decode(test.TestCid1)
|
|
err := cc.LogPin(ctx, testPin(c1))
|
|
if err != nil {
|
|
t.Error("the first pin did not make it to the log:", err)
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
err = cc.raft.Snapshot()
|
|
if err != nil {
|
|
t.Error("the first snapshot was not taken successfully")
|
|
}
|
|
|
|
// Make pin 2
|
|
c2, _ := cid.Decode(test.TestCid2)
|
|
err = cc.LogPin(ctx, testPin(c2))
|
|
if err != nil {
|
|
t.Error("the second pin did not make it to the log:", err)
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
err = cc.raft.Snapshot()
|
|
if err != nil {
|
|
t.Error("the second snapshot was not taken successfully")
|
|
}
|
|
|
|
// Make pin 3
|
|
c3, _ := cid.Decode(test.TestCid3)
|
|
err = cc.LogPin(ctx, testPin(c3))
|
|
if err != nil {
|
|
t.Error("the third pin did not make it to the log:", err)
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
err = cc.raft.Snapshot()
|
|
if err != nil {
|
|
t.Error("the third snapshot was not taken successfully")
|
|
}
|
|
|
|
// Call raft.LastState and ensure we get the correct state
|
|
snapState := mapstate.NewMapState()
|
|
r, snapExists, err := LastStateRaw(cc.config)
|
|
if !snapExists {
|
|
t.Fatal("No snapshot found by LastStateRaw")
|
|
}
|
|
if err != nil {
|
|
t.Fatal("Error while taking snapshot", err)
|
|
}
|
|
err = snapState.Migrate(ctx, r)
|
|
if err != nil {
|
|
t.Fatal("Snapshot bytes returned could not restore to state: ", err)
|
|
}
|
|
pins := snapState.List(ctx)
|
|
if len(pins) != 3 {
|
|
t.Fatal("Latest snapshot not read")
|
|
}
|
|
}
|