Avoid using rand.Seed as it is deprecated

This commit is contained in:
Hector Sanjuan 2023-08-10 18:33:30 +02:00
parent d58c29151d
commit ba2308765f
4 changed files with 22 additions and 35 deletions

View File

@ -19,13 +19,11 @@ import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
jwt "github.com/golang-jwt/jwt/v4"
types "github.com/ipfs-cluster/ipfs-cluster/api"
@ -50,10 +48,6 @@ import (
"go.opencensus.io/trace"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// StreamChannelSize is used to define buffer sizes for channels.
const StreamChannelSize = 1024
@ -854,7 +848,6 @@ func (api *API) SetKeepAlivesEnabled(b bool) {
api.server.SetKeepAlivesEnabled(b)
}
func (api *API) HealthHandler(w http.ResponseWriter, r *http.Request) {
api.SendResponse(w, http.StatusNoContent, nil, nil)
}

View File

@ -11,11 +11,9 @@ import (
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/ipfs-cluster/ipfs-cluster/adder/adderutils"
types "github.com/ipfs-cluster/ipfs-cluster/api"
@ -29,10 +27,6 @@ import (
mux "github.com/gorilla/mux"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
logger = logging.Logger("restapi")
apiLogger = logging.Logger("restapilog")

View File

@ -67,6 +67,8 @@ var (
// clusterPort = 10000
// apiPort = 10100
// ipfsProxyPort = 10200
mrand = rand.New(rand.NewSource(time.Now().UnixNano()))
)
type logFacilities []string
@ -91,7 +93,6 @@ func (lg *logFacilities) Set(value string) error {
// as flag.Parse() does not work well there
// (see https://golang.org/src/testing/testing.go#L211)
func TestMain(m *testing.M) {
rand.Seed(time.Now().UnixNano())
ReadyTimeout = 11 * time.Second
// GossipSub needs to heartbeat to discover newly connected hosts
@ -625,7 +626,7 @@ func TestClustersPeers(t *testing.T) {
delay()
j := rand.Intn(nClusters) // choose a random cluster peer
j := mrand.Intn(nClusters) // choose a random cluster peer
out := make(chan api.ID, len(clusters))
clusters[j].Peers(ctx, out)
@ -673,7 +674,7 @@ func TestClustersPin(t *testing.T) {
ttlDelay()
for i := 0; i < nPins; i++ {
j := rand.Intn(nClusters) // choose a random cluster peer
j := mrand.Intn(nClusters) // choose a random cluster peer
h, err := prefix.Sum(randomBytes()) // create random cid
if err != nil {
t.Fatal(err)
@ -729,7 +730,7 @@ func TestClustersPin(t *testing.T) {
for i := 0; i < len(pinList); i++ {
// test re-unpin fails
j := rand.Intn(nClusters) // choose a random cluster peer
j := mrand.Intn(nClusters) // choose a random cluster peer
_, err := clusters[j].Unpin(ctx, pinList[i].Cid)
if err != nil {
t.Errorf("error unpinning %s: %s", pinList[i].Cid, err)
@ -744,7 +745,7 @@ func TestClustersPin(t *testing.T) {
}
for i := 0; i < len(pinList); i++ {
j := rand.Intn(nClusters) // choose a random cluster peer
j := mrand.Intn(nClusters) // choose a random cluster peer
_, err := clusters[j].Unpin(ctx, pinList[i].Cid)
if err == nil {
t.Errorf("expected error re-unpinning %s", pinList[i].Cid)
@ -1113,7 +1114,7 @@ func TestClustersRecover(t *testing.T) {
pinDelay()
pinDelay()
j := rand.Intn(nClusters)
j := mrand.Intn(nClusters)
ginfo, err := clusters[j].Recover(ctx, h)
if err != nil {
// we always attempt to return a valid response
@ -1152,7 +1153,7 @@ func TestClustersRecover(t *testing.T) {
}
// Test with a good Cid
j = rand.Intn(nClusters)
j = mrand.Intn(nClusters)
ginfo, err = clusters[j].Recover(ctx, h2)
if err != nil {
t.Fatal(err)
@ -1191,7 +1192,7 @@ func TestClustersRecoverAll(t *testing.T) {
out := make(chan api.GlobalPinInfo)
go func() {
err := clusters[rand.Intn(nClusters)].RecoverAll(ctx, out)
err := clusters[mrand.Intn(nClusters)].RecoverAll(ctx, out)
if err != nil {
t.Error(err)
}
@ -1246,7 +1247,7 @@ func TestClustersReplicationOverall(t *testing.T) {
for i := 0; i < nClusters; i++ {
// Pick a random cluster and hash
j := rand.Intn(nClusters) // choose a random cluster peer
j := mrand.Intn(nClusters) // choose a random cluster peer
h, err := prefix.Sum(randomBytes()) // create random cid
if err != nil {
t.Fatal(err)
@ -1699,7 +1700,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters)
j := mrand.Intn(nClusters)
h := test.Cid1
_, err := clusters[j].Pin(ctx, h, api.PinOptions{})
if err != nil {
@ -1761,7 +1762,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
// Make sure we haven't killed our randomly
// selected cluster
for j == killedClusterIndex {
j = rand.Intn(nClusters)
j = mrand.Intn(nClusters)
}
// now pin should succeed
@ -1807,7 +1808,7 @@ func TestClustersReplicationNotEnoughPeers(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters)
j := mrand.Intn(nClusters)
_, err := clusters[j].Pin(ctx, test.Cid1, api.PinOptions{})
if err != nil {
t.Fatal(err)
@ -1972,7 +1973,7 @@ func TestClustersGraphConnected(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters) // choose a random cluster peer to query
j := mrand.Intn(nClusters) // choose a random cluster peer to query
graph, err := clusters[j].ConnectGraph()
if err != nil {
t.Fatal(err)
@ -1997,7 +1998,7 @@ func TestClustersGraphUnhealthy(t *testing.T) {
t.Skip("Need at least 5 peers")
}
j := rand.Intn(nClusters) // choose a random cluster peer to query
j := mrand.Intn(nClusters) // choose a random cluster peer to query
// chose the clusters to shutdown
discon1 := -1
discon2 := -1
@ -2055,7 +2056,7 @@ func TestClustersDisabledRepinning(t *testing.T) {
ttlDelay()
j := rand.Intn(nClusters)
j := mrand.Intn(nClusters)
h := test.Cid1
_, err := clusters[j].Pin(ctx, h, api.PinOptions{})
if err != nil {
@ -2084,7 +2085,7 @@ func TestClustersDisabledRepinning(t *testing.T) {
// Make sure we haven't killed our randomly
// selected cluster
for j == killedClusterIndex {
j = rand.Intn(nClusters)
j = mrand.Intn(nClusters)
}
numPinned := 0
@ -2204,7 +2205,7 @@ func TestClusterPinsWithExpiration(t *testing.T) {
ttlDelay()
cl := clusters[rand.Intn(nClusters)] // choose a random cluster peer to query
cl := clusters[mrand.Intn(nClusters)] // choose a random cluster peer to query
c := test.Cid1
expireIn := 1 * time.Second

View File

@ -3,7 +3,6 @@ package ipfscluster
import (
"context"
"fmt"
"math/rand"
"sync"
"testing"
"time"
@ -425,7 +424,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
chosenID, err := clusters[0].consensus.Leader(ctx)
if err != nil {
// choose a random peer - crdt
i := rand.Intn(nClusters)
i := mrand.Intn(nClusters)
chosenID = clusters[i].host.ID()
}
@ -618,7 +617,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
// }
// f := func(t *testing.T, c *Cluster) {
// j := rand.Intn(2)
// j := mrand.Intn(2)
// err := c.Join(clusterAddr(clusters[j]))
// if err != nil {
// t.Fatal(err)