Merge pull request #256 from ipfs/feat/try-jenkins

Adding Jenkinsfile -- Experiment
This commit is contained in:
ZenGround0 2018-01-12 18:17:00 -05:00 committed by GitHub
commit b8014927f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 32 additions and 19 deletions

View File

@ -1,4 +1,7 @@
language: go
os:
- linux
# - osx
go:
- '1.9'
services:

View File

@ -68,13 +68,13 @@ check:
golint -set_exit_status -min_confidence 0.3 ./...
test: deps
go test -tags silent -v ./...
go test -timeout 20m -tags silent -v ./...
test_sharness: $(sharness)
@sh sharness/run-sharness-tests.sh
test_problem: deps
go test -tags debug -v -run $(problematic_test)
go test -timeout 20m -tags debug -v -run $(problematic_test)
$(sharness):
@echo "Downloading sharness"

1
ci/Jenkinsfile vendored Normal file
View File

@ -0,0 +1 @@
golang([test: "go test -tags silent -v ./..."])

View File

@ -1097,6 +1097,8 @@ func makeHost(ctx context.Context, cfg *Config) (host.Host, error) {
return nil, err
}
ps.AddAddr(cfg.ID, cfg.ListenAddr, peerstore.PermanentAddrTTL)
network, err := swarm.NewNetworkWithProtector(
ctx,
[]ma.Multiaddr{cfg.ListenAddr},

View File

@ -22,16 +22,16 @@ var testingClusterCfg = []byte(`{
"state_sync_interval": "1m0s",
"ipfs_sync_interval": "2m10s",
"replication_factor": -1,
"monitor_ping_interval": "2s"
"monitor_ping_interval": "1s"
}
`)
var testingRaftCfg = []byte(`{
"data_folder": "raftFolderFromTests",
"wait_for_leader_timeout": "15s",
"wait_for_leader_timeout": "30s",
"commit_retries": 1,
"commit_retry_delay": "1s",
"network_timeout": "2s",
"network_timeout": "20s",
"heartbeat_timeout": "1s",
"election_timeout": "1s",
"commit_timeout": "50ms",
@ -69,7 +69,7 @@ var testingTrackerCfg = []byte(`
`)
var testingMonCfg = []byte(`{
"check_interval": "2s"
"check_interval": "1s"
}`)
var testingDiskInfCfg = []byte(`{

View File

@ -41,6 +41,7 @@ func makeTestingHost(t *testing.T, port int) host.Host {
ps := peerstore.NewPeerstore()
ps.AddPubKey(pid, pub)
ps.AddPrivKey(pid, priv)
ps.AddAddr(pid, maddr, peerstore.PermanentAddrTTL)
n, _ := swarm.NewNetwork(
context.Background(),
[]ma.Multiaddr{maddr},
@ -63,6 +64,7 @@ func testingConsensus(t *testing.T, port int) *Consensus {
}
cc.SetClient(test.NewMockRPCClientWithHost(t, h))
<-cc.Ready()
time.Sleep(2 * time.Second)
return cc
}
@ -135,7 +137,7 @@ func TestConsensusAddPeer(t *testing.T) {
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p2pPortAlt))
cc.host.Peerstore().AddAddr(cc2.host.ID(), addr, peerstore.TempAddrTTL)
cc.host.Peerstore().AddAddr(cc2.host.ID(), addr, peerstore.PermanentAddrTTL)
err := cc.AddPeer(cc2.host.ID())
if err != nil {
t.Error("the operation did not make it to the log:", err)
@ -166,8 +168,9 @@ func TestConsensusRmPeer(t *testing.T) {
defer cc.Shutdown()
defer cc2.Shutdown()
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p2pPortAlt))
cc.host.Peerstore().AddAddr(cc2.host.ID(), addr, peerstore.TempAddrTTL)
//addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p2pPort))
addr2, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p2pPortAlt))
cc.host.Peerstore().AddAddr(cc2.host.ID(), addr2, peerstore.PermanentAddrTTL)
err := cc.AddPeer(cc2.host.ID())
if err != nil {

View File

@ -7,7 +7,7 @@ for dir in $dirs;
do
if ls "$dir"/*.go &> /dev/null;
then
go test -v -coverprofile=profile.out -covermode=count -tags silent -timeout 15m "$dir"
go test -timeout 20m -v -coverprofile=profile.out -covermode=count -tags silent "$dir"
if [ $? -ne 0 ];
then
exit 1

View File

@ -1,6 +1,7 @@
package ipfscluster
import (
"flag"
"fmt"
"math/rand"
"os"
@ -31,15 +32,15 @@ import (
//TestClusters*
var (
// number of clusters to create
nClusters = 6
nClusters = *flag.Int("nclusters", 6, "number of clusters to use")
// number of pins to pin/unpin/check
nPins = 500
nPins = *flag.Int("npins", 500, "number of pins to pin/unpin/check")
// ports
clusterPort = 20000
apiPort = 20500
ipfsProxyPort = 21000
clusterPort = 10000
apiPort = 10100
ipfsProxyPort = 10200
)
func init() {
@ -185,11 +186,11 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
func shutdownClusters(t *testing.T, clusters []*Cluster, m []*test.IpfsMock) {
for i, c := range clusters {
m[i].Close()
err := c.Shutdown()
if err != nil {
t.Error(err)
}
m[i].Close()
}
os.RemoveAll("./e2eTestRaft")
}
@ -767,7 +768,7 @@ func TestClustersReplication(t *testing.T) {
if numRemote != 1 {
t.Errorf("We wanted 1 peer track as remote but %d do", numRemote)
}
time.Sleep(time.Second / 2) // this is for metric to be up to date
time.Sleep(time.Second) // this is for metric to be up to date
}
f := func(t *testing.T, c *Cluster) {

View File

@ -247,6 +247,7 @@ func (mon *Monitor) LastMetrics(name string) []api.Metric {
}
last, err := peerMetrics.latest()
if err != nil || last.Discard() {
logger.Warningf("no valid last metric for peer: %+v", last)
continue
}
metrics = append(metrics, last)

View File

@ -29,6 +29,7 @@ func peerManagerClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
}(i)
}
wg.Wait()
delay()
return cls, mocks
}
@ -277,6 +278,7 @@ func TestClustersPeerRemoveLeader(t *testing.T) {
if more {
t.Error("should be done")
}
time.Sleep(time.Second)
}
}
@ -330,7 +332,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
checkErr(t, err)
err = leader.Pin(api.PinCid(h))
checkErr(t, err)
time.Sleep(time.Second)
time.Sleep(time.Second) // time to update the metrics
}
delay()
@ -362,7 +364,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
t.Fatal("error removing peer:", err)
}
time.Sleep(time.Second)
time.Sleep(2 * time.Second)
waitForLeader(t, clusters)
delay()