config: Add log and testcase for disable_repinning
* Test case creates a bunch of clusters, assigns a pin with replica factor of n-1 to them, and removes one of the peers randomly. It then tests to check that the number of clusters pinning the cid is n-2. * Add warn log to let user know that due to disable_repinning option, the cluster won't attempt to re-assign the pin. License: MIT Signed-off-by: Sina Mahmoodi <itz.s1na@gmail.com>
This commit is contained in:
parent
0954c6d6fa
commit
03cc809708
|
@ -398,6 +398,7 @@ func (c *Cluster) watchPeers() {
|
|||
// find all Cids pinned to a given peer and triggers re-pins on them.
|
||||
func (c *Cluster) repinFromPeer(p peer.ID) {
|
||||
if c.config.DisableRepinning {
|
||||
logger.Warningf("repinning is disabled. Will not re-allocate cids from %s", p.Pretty())
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -1580,3 +1580,65 @@ func TestClustersGraphUnhealthy(t *testing.T) {
|
|||
}
|
||||
validateClusterGraph(t, graph, clusterIDs)
|
||||
}
|
||||
|
||||
// Check that the pin is not re-assigned when a node
|
||||
// that has disabled repinning goes down.
|
||||
func TestClustersDisabledRepinning(t *testing.T) {
|
||||
clusters, mock := createClusters(t)
|
||||
defer shutdownClusters(t, clusters, mock)
|
||||
for _, c := range clusters {
|
||||
c.config.ReplicationFactorMin = nClusters - 1
|
||||
c.config.ReplicationFactorMax = nClusters - 1
|
||||
c.config.DisableRepinning = true
|
||||
}
|
||||
|
||||
ttlDelay()
|
||||
|
||||
j := rand.Intn(nClusters)
|
||||
h, _ := cid.Decode(test.TestCid1)
|
||||
err := clusters[j].Pin(api.PinCid(h))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Let the pin arrive
|
||||
pinDelay()
|
||||
|
||||
var killedClusterIndex int
|
||||
// find someone that pinned it and kill that cluster
|
||||
for i, c := range clusters {
|
||||
pinfo := c.tracker.Status(h)
|
||||
if pinfo.Status == api.TrackerStatusPinned {
|
||||
killedClusterIndex = i
|
||||
t.Logf("Shutting down %s", c.ID().ID)
|
||||
c.Shutdown()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// let metrics expire and give time for the cluster to
|
||||
// see if they have lost the leader
|
||||
waitForLeaderAndMetrics(t, clusters)
|
||||
|
||||
// Make sure we haven't killed our randomly
|
||||
// selected cluster
|
||||
for j == killedClusterIndex {
|
||||
j = rand.Intn(nClusters)
|
||||
}
|
||||
|
||||
numPinned := 0
|
||||
for i, c := range clusters {
|
||||
if i == killedClusterIndex {
|
||||
continue
|
||||
}
|
||||
pinfo := c.tracker.Status(h)
|
||||
if pinfo.Status == api.TrackerStatusPinned {
|
||||
//t.Log(pinfo.Peer.Pretty())
|
||||
numPinned++
|
||||
}
|
||||
}
|
||||
|
||||
if numPinned != nClusters-2 {
|
||||
t.Errorf("expected %d replicas for pin, got %d", nClusters-2, numPinned)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user