Fear #277: Add test about wanted < 0

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
Hector Sanjuan 2018-01-16 11:19:39 +01:00
parent b12138f7ff
commit b013850f94
9 changed files with 89 additions and 28 deletions

View File

@ -943,13 +943,8 @@ func (c *Cluster) Pins() []api.Pin {
// the item is successfully pinned. For that, use Status(). PinGet // the item is successfully pinned. For that, use Status(). PinGet
// returns an error if the given Cid is not part of the global state. // returns an error if the given Cid is not part of the global state.
func (c *Cluster) PinGet(h *cid.Cid) (api.Pin, error) { func (c *Cluster) PinGet(h *cid.Cid) (api.Pin, error) {
cState, err := c.consensus.State() pin := c.getCurrentPin(h)
if err != nil { if pin.ReplicationFactorMin == 0 && pin.ReplicationFactorMax == 0 {
logger.Error(err)
return api.Pin{}, err
}
pin := cState.Get(h)
if pin.Cid == nil {
return pin, errors.New("cid is not part of the global state") return pin, errors.New("cid is not part of the global state")
} }
return pin, nil return pin, nil

View File

@ -198,7 +198,11 @@ func TestClusterPin(t *testing.T) {
// test an error case // test an error case
cl.consensus.Shutdown() cl.consensus.Shutdown()
err = cl.Pin(api.PinCid(c)) err = cl.Pin(api.Pin{
Cid: c,
ReplicationFactorMax: 1,
ReplicationFactorMin: 1,
})
if err == nil { if err == nil {
t.Error("expected an error but things worked") t.Error("expected an error but things worked")
} }
@ -239,7 +243,7 @@ func TestClusterPinGet(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !pin.Cid.Equals(c) || pin.ReplicationFactorMax != -1 || pin.ReplicationFactorMax != -1 { if !pin.Cid.Equals(c) || pin.ReplicationFactorMin != -1 || pin.ReplicationFactorMax != -1 {
t.Error("the Pin does not look as expected") t.Error("the Pin does not look as expected")
} }

View File

@ -159,16 +159,16 @@ func textFormatPrintVersion(obj *api.Version) {
} }
func textFormatPrintPin(obj *api.PinSerial) { func textFormatPrintPin(obj *api.PinSerial) {
fmt.Printf("%s | %s | Repl. Factor: %d.to.%d | Allocations: ", fmt.Printf("%s | %s | ", obj.Cid, obj.Name)
obj.Cid, obj.Name,
obj.ReplicationFactorMin, obj.ReplicationFactorMax)
if obj.ReplicationFactorMin < 0 { if obj.ReplicationFactorMin < 0 {
fmt.Printf("[everywhere]\n") fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]\n")
} else { } else {
var sortAlloc sort.StringSlice = obj.Allocations var sortAlloc sort.StringSlice = obj.Allocations
sortAlloc.Sort() sortAlloc.Sort()
fmt.Printf("%s\n", sortAlloc) fmt.Printf("Repl. Factor: %d->%d | Allocations: %s\n",
obj.ReplicationFactorMin, obj.ReplicationFactorMax,
sortAlloc)
} }
} }

View File

@ -868,12 +868,64 @@ func TestClustersReplicationFactorMax(t *testing.T) {
} }
if p.ReplicationFactorMax != nClusters-1 { if p.ReplicationFactorMax != nClusters-1 {
t.Error("rplMax should be nClusters") t.Error("rplMax should be nClusters-1")
} }
} }
runF(t, clusters, f) runF(t, clusters, f)
} }
// This tests checks that repinning something that is overpinned
// removes some allocations
func TestClustersReplicationFactorMaxLower(t *testing.T) {
if nClusters < 5 {
t.Skip("Need at least 5 peers")
}
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
for _, c := range clusters {
c.config.ReplicationFactorMin = 1
c.config.ReplicationFactorMax = nClusters
}
h, _ := cid.Decode(test.TestCid1)
err := clusters[0].Pin(api.PinCid(h))
if err != nil {
t.Fatal(err)
}
delay()
p1, err := clusters[0].PinGet(h)
if err != nil {
t.Fatal(err)
}
if len(p1.Allocations) != nClusters {
t.Fatal("allocations should be nClusters")
}
err = clusters[0].Pin(api.Pin{
Cid: h,
ReplicationFactorMax: 2,
ReplicationFactorMin: 1,
})
if err != nil {
t.Fatal(err)
}
delay()
p2, err := clusters[0].PinGet(h)
if err != nil {
t.Fatal(err)
}
if len(p2.Allocations) != 2 {
t.Fatal("allocations should have been reduced to 2")
}
}
// This test checks that when not all nodes are available, // This test checks that when not all nodes are available,
// we pin in as many as we can aiming for ReplicationFactorMax // we pin in as many as we can aiming for ReplicationFactorMax
func TestClustersReplicationFactorInBetween(t *testing.T) { func TestClustersReplicationFactorInBetween(t *testing.T) {
@ -917,7 +969,7 @@ func TestClustersReplicationFactorInBetween(t *testing.T) {
} }
if len(p.Allocations) != nClusters-2 { if len(p.Allocations) != nClusters-2 {
t.Error("should have pinned nClusters - 1 allocations") t.Error("should have pinned nClusters-2 allocations")
} }
if p.ReplicationFactorMin != 1 { if p.ReplicationFactorMin != 1 {
@ -1017,7 +1069,7 @@ func TestClustersReplicationMinMaxNoRealloc(t *testing.T) {
} }
} }
// This test checks tat repinning something that has becomed // This test checks that repinning something that has becomed
// underpinned does re-allocations when it's not sufficiently // underpinned does re-allocations when it's not sufficiently
// pinned anymore // pinned anymore
func TestClustersReplicationMinMaxRealloc(t *testing.T) { func TestClustersReplicationMinMaxRealloc(t *testing.T) {
@ -1080,10 +1132,6 @@ func TestClustersReplicationMinMaxRealloc(t *testing.T) {
secondAllocations := p.Allocations secondAllocations := p.Allocations
if len(secondAllocations) != minInt(nClusters-2, 4) {
t.Error("pin should be allocated again to a few peers")
}
strings1 := api.PeersToStrings(firstAllocations) strings1 := api.PeersToStrings(firstAllocations)
strings2 := api.PeersToStrings(secondAllocations) strings2 := api.PeersToStrings(secondAllocations)
sort.Strings(strings1) sort.Strings(strings1)
@ -1095,6 +1143,12 @@ func TestClustersReplicationMinMaxRealloc(t *testing.T) {
t.Error("allocations should have changed") t.Error("allocations should have changed")
} }
lenSA := len(secondAllocations)
expected := minInt(nClusters-2, 4)
if lenSA != expected {
t.Errorf("Inssufficient reallocation, could have allocated to %d peers but instead only allocated to %d peers", expected, lenSA)
}
if len(secondAllocations) < 3 { if len(secondAllocations) < 3 {
t.Error("allocations should be more than rplMin") t.Error("allocations should be more than rplMin")
} }

View File

@ -181,9 +181,12 @@ func (mpt *MapPinTracker) unsafeSetError(c *cid.Cid, err error) {
} }
func (mpt *MapPinTracker) isRemote(c api.Pin) bool { func (mpt *MapPinTracker) isRemote(c api.Pin) bool {
if c.ReplicationFactorMin < 0 { if c.ReplicationFactorMax < 0 {
return false return false
} }
if c.ReplicationFactorMax == 0 {
logger.Errorf("Pin with replication factor 0! %+v", c)
}
for _, p := range c.Allocations { for _, p := range c.Allocations {
if p == mpt.peerID { if p == mpt.peerID {

View File

@ -1,3 +1,5 @@
#!/bin/bash
# Sharness test framework for ipfs-cluster # Sharness test framework for ipfs-cluster
# #
# We are using sharness (https://github.com/mlafeldt/sharness) # We are using sharness (https://github.com/mlafeldt/sharness)
@ -107,7 +109,7 @@ test_confirm_importState() {
} }
cluster_kill(){ cluster_kill(){
kill -1 "$CLUSTER_D_PID" kill -1 "$CLUSTER_D_PID" &>/dev/null
while pgrep ipfs-cluster-service >/dev/null; do while pgrep ipfs-cluster-service >/dev/null; do
sleep 0.2 sleep 0.2
done done
@ -126,8 +128,8 @@ cluster_start(){
# Cleanup functions # Cleanup functions
test_clean_ipfs(){ test_clean_ipfs(){
docker kill ipfs docker kill ipfs >/dev/null
docker rm ipfs docker rm ipfs >/dev/null
sleep 1 sleep 1
} }

View File

@ -9,7 +9,8 @@ test_cluster_init
test_expect_success IPFS,CLUSTER "state export fails without snapshots" ' test_expect_success IPFS,CLUSTER "state export fails without snapshots" '
cluster_kill && sleep 5 && cluster_kill
sleep 5
test_expect_code 1 ipfs-cluster-service --debug --config "test-config" state export test_expect_code 1 ipfs-cluster-service --debug --config "test-config" state export
' '

View File

@ -18,6 +18,7 @@ test_expect_success IPFS,CLUSTER "state import fails on incorrect format" '
' '
test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format" ' test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format" '
sleep 5
cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` &&
ipfs-cluster-service -f --debug --config "test-config" state import importState && ipfs-cluster-service -f --debug --config "test-config" state import importState &&
cluster_start && cluster_start &&

View File

@ -3,6 +3,7 @@
"cid": "QmbrCtydGyPeHiLURSPMqrvE5mCgMCwFYq3UD4XLCeAYw6", "cid": "QmbrCtydGyPeHiLURSPMqrvE5mCgMCwFYq3UD4XLCeAYw6",
"name": "", "name": "",
"allocations": [], "allocations": [],
"replication_factor": -1 "replication_factor_min": -1,
"replication_factor_max": -1
} }
] ]