testfixing
License: MIT Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
parent
db9070e0ca
commit
65dc17a78b
|
@ -8,6 +8,8 @@ import (
|
|||
"github.com/ipfs/ipfs-cluster/api"
|
||||
)
|
||||
|
||||
var DefaultShardSize = uint64(100 * 1024 * 1024) // 100 MB
|
||||
|
||||
// Params contains all of the configurable parameters needed to specify the
|
||||
// importing process of a file being added to an ipfs-cluster
|
||||
type Params struct {
|
||||
|
@ -32,7 +34,7 @@ func DefaultParams() *Params {
|
|||
ReplicationFactorMin: 0,
|
||||
ReplicationFactorMax: 0,
|
||||
Name: "",
|
||||
ShardSize: 100 * 1024 * 1024, // 100 MB
|
||||
ShardSize: DefaultShardSize, // 100 MB
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -305,25 +305,41 @@ func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo) (bool
|
|||
}
|
||||
|
||||
// AddMultiFile adds new files to the ipfs cluster, importing and potentially
|
||||
// sharding underlying dags across the ipfs repos of multiple cluster peers
|
||||
// sharding underlying dags across the ipfs repos of multiple cluster peers.
|
||||
func (c *Client) AddMultiFile(
|
||||
multiFileR *files.MultiFileReader,
|
||||
replicationFactorMin int,
|
||||
replicationFactorMax int,
|
||||
name string,
|
||||
shard bool,
|
||||
shardSize int,
|
||||
layout string,
|
||||
chunker string,
|
||||
raw bool,
|
||||
hidden bool,
|
||||
replMin, replMax int) ([]api.AddedOutput, error) {
|
||||
) error {
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary()
|
||||
fmtStr1 := "/allocations?shard=%t&layout=%s&"
|
||||
fmtStr2 := "chunker=%s&raw=%t&hidden=%t&repl_min=%d&repl_max=%d"
|
||||
url := fmt.Sprintf(fmtStr1+fmtStr2, shard, layout, chunker, raw, hidden,
|
||||
replMin, replMax)
|
||||
fmtStr := "/allocations?repl_min=%d&repl_max=%d&name=%s&"
|
||||
fmtStr += "shard=%t&shard-size=%d&"
|
||||
fmtStr += "layout=%s&chunker=%s&raw=%t&hidden=%t"
|
||||
url := fmt.Sprintf(
|
||||
fmtStr,
|
||||
replicationFactorMin,
|
||||
replicationFactorMax,
|
||||
name,
|
||||
shard,
|
||||
shardSize,
|
||||
layout,
|
||||
chunker,
|
||||
raw,
|
||||
hidden,
|
||||
)
|
||||
output := make([]api.AddedOutput, 0)
|
||||
err := c.doStream("POST", url, multiFileR, headers, &output)
|
||||
return output, err
|
||||
// TODO: handle output
|
||||
return err
|
||||
}
|
||||
|
||||
// Eventually an Add(io.Reader) method for adding raw readers as a multifile should be here.
|
||||
// TODO: Eventually an Add(io.Reader) method for adding raw readers as a multifile should be here.
|
||||
|
|
|
@ -8,10 +8,11 @@ import (
|
|||
|
||||
rpc "github.com/hsanjuan/go-libp2p-gorpc"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
types "github.com/ipfs/ipfs-cluster/api"
|
||||
peer "github.com/libp2p/go-libp2p-peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
types "github.com/ipfs/ipfs-cluster/api"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/api/rest"
|
||||
"github.com/ipfs/ipfs-cluster/test"
|
||||
|
@ -435,18 +436,17 @@ func TestAddMultiFile(t *testing.T) {
|
|||
defer api.Shutdown()
|
||||
|
||||
testF := func(t *testing.T, c *Client) {
|
||||
mfr, err := test.GetTestingDirMultiReader()
|
||||
sth := test.NewShardingTestHelper()
|
||||
mfr := sth.GetTreeMultiReader(t)
|
||||
err := c.AddMultiFile(mfr, -1, -1, "test", false, 1024, "", "", false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := c.AddMultiFile(mfr, false, "", "", false, false, -1, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(out) != test.NumTestDirPrints ||
|
||||
out[len(out)-1].Hash != test.TestDirBalancedRootCID {
|
||||
t.Fatal("unexpected addedoutput from mock rpc on api")
|
||||
}
|
||||
// TODO: output handling
|
||||
// if len(out) != test.NumTestDirPrints ||
|
||||
// out[len(out)-1].Hash != test.TestDirBalancedRootCID {
|
||||
// t.Fatal("unexpected addedoutput from mock rpc on api")
|
||||
// }
|
||||
}
|
||||
|
||||
testClients(t, api, testF)
|
||||
|
|
|
@ -1150,7 +1150,7 @@ func (c *Cluster) unpinShard(cdagCid, shardCid *cid.Cid) error {
|
|||
// pipeline is used to DAGify the file. Depending on input parameters this
|
||||
// DAG can be added locally to the calling cluster peer's ipfs repo, or
|
||||
// sharded across the entire cluster.
|
||||
func (c *Cluster) AddFile(reader *multipart.Reader, params *adder.Params) error {
|
||||
func (c *Cluster) AddFile(reader *multipart.Reader, params *adder.Params) (*cid.Cid, error) {
|
||||
var add adder.Adder
|
||||
if params.Shard {
|
||||
add = sharding.New(c.rpcClient)
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/ipfs/ipfs-cluster/consensus/raft"
|
||||
"github.com/ipfs/ipfs-cluster/informer/numpin"
|
||||
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
|
||||
"github.com/ipfs/ipfs-cluster/sharder"
|
||||
"github.com/ipfs/ipfs-cluster/state/mapstate"
|
||||
"github.com/ipfs/ipfs-cluster/test"
|
||||
|
||||
|
@ -23,8 +22,7 @@ import (
|
|||
)
|
||||
|
||||
type mockComponent struct {
|
||||
rpcClient *rpc.Client
|
||||
returnError bool
|
||||
rpcClient *rpc.Client
|
||||
}
|
||||
|
||||
func (c *mockComponent) Shutdown() error {
|
||||
|
@ -42,43 +40,54 @@ type mockAPI struct {
|
|||
|
||||
type mockConnector struct {
|
||||
mockComponent
|
||||
|
||||
pins sync.Map
|
||||
blocks sync.Map
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) ID() (api.IPFSID, error) {
|
||||
if ipfs.returnError {
|
||||
return api.IPFSID{}, errors.New("")
|
||||
}
|
||||
return api.IPFSID{
|
||||
ID: test.TestPeerID1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) Pin(ctx context.Context, c *cid.Cid, b bool) error {
|
||||
if ipfs.returnError {
|
||||
return errors.New("")
|
||||
}
|
||||
func (ipfs *mockConnector) Pin(ctx context.Context, c *cid.Cid, maxDepth int) error {
|
||||
ipfs.pins.Store(c.String(), maxDepth)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) Unpin(ctx context.Context, c *cid.Cid) error {
|
||||
if ipfs.returnError {
|
||||
return errors.New("")
|
||||
}
|
||||
ipfs.pins.Delete(c.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) PinLsCid(ctx context.Context, c *cid.Cid) (api.IPFSPinStatus, error) {
|
||||
if ipfs.returnError {
|
||||
return api.IPFSPinStatusError, errors.New("")
|
||||
dI, ok := ipfs.pins.Load(c.String())
|
||||
depth := dI.(int)
|
||||
if !ok {
|
||||
return api.IPFSPinStatusUnpinned, nil
|
||||
}
|
||||
if depth == 0 {
|
||||
return api.IPFSPinStatusDirect, nil
|
||||
}
|
||||
return api.IPFSPinStatusRecursive, nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string]api.IPFSPinStatus, error) {
|
||||
if ipfs.returnError {
|
||||
return nil, errors.New("")
|
||||
}
|
||||
m := make(map[string]api.IPFSPinStatus)
|
||||
var st api.IPFSPinStatus
|
||||
ipfs.pins.Range(func(k, v) bool {
|
||||
switch v.(int) {
|
||||
case 0:
|
||||
st = IPFSPinStatusDirect
|
||||
default:
|
||||
st = IPFSPinStatusRecursive
|
||||
}
|
||||
|
||||
m[k] = st
|
||||
return true
|
||||
})
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
@ -90,25 +99,22 @@ func (ipfs *mockConnector) ConnectSwarms() error { retu
|
|||
func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil }
|
||||
func (ipfs *mockConnector) FreeSpace() (uint64, error) { return 100, nil }
|
||||
func (ipfs *mockConnector) RepoSize() (uint64, error) { return 0, nil }
|
||||
func (ipfs *mockConnector) BlockPut(nwm api.NodeWithMeta) error { return nil }
|
||||
|
||||
func (ipfs *mockConnector) BlockPut(nwm api.NodeWithMeta) error {
|
||||
ipfs.blocks.Store(nwm.Cid, nwm.Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) BlockGet(c *cid.Cid) ([]byte, error) {
|
||||
switch c.String() {
|
||||
case test.TestShardCid:
|
||||
return test.TestShardData, nil
|
||||
case test.TestCdagCid:
|
||||
return test.TestCdagData, nil
|
||||
case test.TestShardCid2:
|
||||
return test.TestShard2Data, nil
|
||||
case test.TestCdagCid2:
|
||||
return test.TestCdagData2, nil
|
||||
default:
|
||||
return nil, errors.New("block not found")
|
||||
d, ok := ipfs.blocks.Load(c.String())
|
||||
if !ok {
|
||||
errors.New("block not found")
|
||||
}
|
||||
return d.([]byte)
|
||||
}
|
||||
|
||||
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *maptracker.MapPinTracker) {
|
||||
clusterCfg, _, _, consensusCfg, trackerCfg, bmonCfg, psmonCfg, _, sharderCfg := testingConfigs()
|
||||
clusterCfg, _, _, consensusCfg, trackerCfg, bmonCfg, psmonCfg, _ := testingConfigs()
|
||||
|
||||
host, err := NewClusterHost(context.Background(), clusterCfg)
|
||||
if err != nil {
|
||||
|
@ -130,7 +136,6 @@ func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate
|
|||
numpinCfg := &numpin.Config{}
|
||||
numpinCfg.Default()
|
||||
inf, _ := numpin.NewInformer(numpinCfg)
|
||||
sharder, _ := sharder.NewSharder(sharderCfg)
|
||||
|
||||
ReadyTimeout = consensusCfg.WaitForLeaderTimeout + 1*time.Second
|
||||
|
||||
|
@ -145,7 +150,7 @@ func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate
|
|||
mon,
|
||||
alloc,
|
||||
inf,
|
||||
sharder)
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal("cannot create cluster:", err)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/ipfs/ipfs-cluster/monitor/basic"
|
||||
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
|
||||
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
|
||||
"github.com/ipfs/ipfs-cluster/sharder"
|
||||
)
|
||||
|
||||
var testingClusterSecret, _ = DecodeClusterSecret("2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed")
|
||||
|
@ -82,12 +81,8 @@ var testingDiskInfCfg = []byte(`{
|
|||
"metric_type": "freespace"
|
||||
}`)
|
||||
|
||||
var testingSharderCfg = []byte(`{
|
||||
"alloc_size": 5000000
|
||||
}`)
|
||||
|
||||
func testingConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *pubsubmon.Config, *disk.Config, *sharder.Config) {
|
||||
clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg, sharderCfg := testingEmptyConfigs()
|
||||
func testingConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *pubsubmon.Config, *disk.Config) {
|
||||
clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg := testingEmptyConfigs()
|
||||
clusterCfg.LoadJSON(testingClusterCfg)
|
||||
apiCfg.LoadJSON(testingAPICfg)
|
||||
ipfsCfg.LoadJSON(testingIpfsCfg)
|
||||
|
@ -96,12 +91,11 @@ func testingConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *m
|
|||
basicmonCfg.LoadJSON(testingMonCfg)
|
||||
pubsubmonCfg.LoadJSON(testingMonCfg)
|
||||
diskInfCfg.LoadJSON(testingDiskInfCfg)
|
||||
sharderCfg.LoadJSON(testingSharderCfg)
|
||||
|
||||
return clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg, sharderCfg
|
||||
return clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg
|
||||
}
|
||||
|
||||
func testingEmptyConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *pubsubmon.Config, *disk.Config, *sharder.Config) {
|
||||
func testingEmptyConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *pubsubmon.Config, *disk.Config) {
|
||||
clusterCfg := &Config{}
|
||||
apiCfg := &rest.Config{}
|
||||
ipfshttpCfg := &ipfshttp.Config{}
|
||||
|
@ -110,8 +104,7 @@ func testingEmptyConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Confi
|
|||
basicmonCfg := &basic.Config{}
|
||||
pubsubmonCfg := &pubsubmon.Config{}
|
||||
diskInfCfg := &disk.Config{}
|
||||
sharderCfg := &sharder.Config{}
|
||||
return clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg, sharderCfg
|
||||
return clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, trackerCfg, basicmonCfg, pubsubmonCfg, diskInfCfg
|
||||
}
|
||||
|
||||
// func TestConfigDefault(t *testing.T) {
|
||||
|
|
|
@ -31,6 +31,13 @@ func consensusAddr(c *Consensus) ma.Multiaddr {
|
|||
return cAddr
|
||||
}
|
||||
|
||||
func testPin(c *cid.Cid) api.Pin {
|
||||
p := api.PinCid(c)
|
||||
p.ReplicationFactorMin = -1
|
||||
p.ReplicationFactorMax = -1
|
||||
return p
|
||||
}
|
||||
|
||||
func makeTestingHost(t *testing.T) host.Host {
|
||||
h, err := libp2p.New(
|
||||
context.Background(),
|
||||
|
@ -90,7 +97,7 @@ func TestConsensusPin(t *testing.T) {
|
|||
defer cc.Shutdown()
|
||||
|
||||
c, _ := cid.Decode(test.TestCid1)
|
||||
err := cc.LogPin(api.Pin{Cid: c, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
err := cc.LogPin(testPin(c))
|
||||
if err != nil {
|
||||
t.Error("the operation did not make it to the log:", err)
|
||||
}
|
||||
|
@ -126,13 +133,8 @@ func TestConsensusUpdate(t *testing.T) {
|
|||
|
||||
// Pin first
|
||||
c1, _ := cid.Decode(test.TestCid1)
|
||||
pin := api.Pin{
|
||||
Cid: c1,
|
||||
Type: api.ShardType,
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
Parents: nil,
|
||||
}
|
||||
pin := testPin(c1)
|
||||
pin.Type = api.ShardType
|
||||
err := cc.LogPin(pin)
|
||||
if err != nil {
|
||||
t.Fatal("the initial operation did not make it to the log:", err)
|
||||
|
@ -221,7 +223,7 @@ func TestConsensusRmPeer(t *testing.T) {
|
|||
cc.raft.WaitForLeader(ctx)
|
||||
|
||||
c, _ := cid.Decode(test.TestCid1)
|
||||
err = cc.LogPin(api.Pin{Cid: c, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
err = cc.LogPin(testPin(c))
|
||||
if err != nil {
|
||||
t.Error("could not pin after adding peer:", err)
|
||||
}
|
||||
|
@ -269,7 +271,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
|
|||
|
||||
// Make pin 1
|
||||
c1, _ := cid.Decode(test.TestCid1)
|
||||
err := cc.LogPin(api.Pin{Cid: c1, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
err := cc.LogPin(testPin(c1))
|
||||
if err != nil {
|
||||
t.Error("the first pin did not make it to the log:", err)
|
||||
}
|
||||
|
@ -282,7 +284,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
|
|||
|
||||
// Make pin 2
|
||||
c2, _ := cid.Decode(test.TestCid2)
|
||||
err = cc.LogPin(api.Pin{Cid: c2, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
err = cc.LogPin(testPin(c2))
|
||||
if err != nil {
|
||||
t.Error("the second pin did not make it to the log:", err)
|
||||
}
|
||||
|
@ -295,7 +297,7 @@ func TestRaftLatestSnapshot(t *testing.T) {
|
|||
|
||||
// Make pin 3
|
||||
c3, _ := cid.Decode(test.TestCid3)
|
||||
err = cc.LogPin(api.Pin{Cid: c3, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
err = cc.LogPin(testPin(c3))
|
||||
if err != nil {
|
||||
t.Error("the third pin did not make it to the log:", err)
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func TestApplyToUnpin(t *testing.T) {
|
|||
|
||||
st := mapstate.NewMapState()
|
||||
c, _ := cid.Decode(test.TestCid1)
|
||||
st.Add(api.Pin{Cid: c, ReplicationFactorMin: -1, ReplicationFactorMax: -1})
|
||||
st.Add(testPin(c))
|
||||
op.ApplyTo(st)
|
||||
pins := st.List()
|
||||
if len(pins) != 0 {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/api/rest/client"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log"
|
||||
|
@ -32,6 +33,7 @@ var (
|
|||
defaultUsername = ""
|
||||
defaultPassword = ""
|
||||
defaultWaitCheckFreq = time.Second
|
||||
defaultShardSize = 100 * 1024 * 1024
|
||||
)
|
||||
|
||||
var logger = logging.Logger("cluster-ctl")
|
||||
|
@ -182,91 +184,9 @@ requires authorization. implies --https, which you can disable with --force-http
|
|||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "ipfs-cluster-ctl add <path> ... add a file to ipfs via cluster",
|
||||
Description: `
|
||||
Only works with file paths, no directories. Recurisive adding not yet
|
||||
supported. --shard flag not yet supported. Eventually users would use this
|
||||
endpoint if they want the file to be sharded across the cluster. This is useful
|
||||
in the case several ipfs peers want to ingest the file and combined have enough
|
||||
space to host but no single peer's repo has the capacity for the entire file.
|
||||
No stdin reading yet either, that is also TODO
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "recursive, r",
|
||||
Usage: "Add directory paths recursively, default false",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "shard",
|
||||
Usage: "Break the file into pieces (shards) and distributed among peers, default false",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "only-hashes",
|
||||
Usage: "Write newline separated list of hashes to output",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "layout, L",
|
||||
Usage: `Dag layout to use for dag generation. Currently 'trickle' is the only option
|
||||
supported`,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "chunker, s",
|
||||
Usage: `Chunking algorithm to use. Either fixed block size: 'size-<size>', or rabin
|
||||
chunker: 'rabin-<min>-<avg>-<max>'. Default is 'size-262144'`,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw-leaves",
|
||||
Usage: "Use raw blocks for leaves (experimental)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "progress, p",
|
||||
Usage: "Stream progress data",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "hidden, H",
|
||||
Usage: "Include files that are hidden. Only takes effect on recursive add",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "replication-min, rmin",
|
||||
Value: 0,
|
||||
Usage: "Sets the minimum replication factor for pinning this file",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "replication-max, rmax",
|
||||
Value: 0,
|
||||
Usage: "Sets the maximum replication factor for pinning this file",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
paths := make([]string, c.NArg(), c.NArg())
|
||||
for i, path := range c.Args() {
|
||||
paths[i] = path
|
||||
}
|
||||
// Files are all opened but not read until they are sent.
|
||||
multiFileR, err := parseFileArgs(paths, c.Bool("recursive"), c.Bool("hidden"))
|
||||
checkErr("serializing all files", err)
|
||||
resp, cerr := globalClient.AddMultiFile(multiFileR,
|
||||
c.Bool("shard"), c.String("layout"),
|
||||
c.String("chunker"),
|
||||
c.Bool("raw-leaves"),
|
||||
c.Bool("hidden"),
|
||||
c.Int("replication-min"),
|
||||
c.Int("replication-max"),
|
||||
)
|
||||
if c.Bool("only-hashes") {
|
||||
for i := range resp {
|
||||
resp[i].Quiet = true
|
||||
}
|
||||
}
|
||||
formatResponse(c, resp, cerr)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "id",
|
||||
Usage: "retrieve peer information",
|
||||
Usage: "Retrieve peer information",
|
||||
Description: `
|
||||
This command displays information about the peer that the tool is contacting
|
||||
(usually running in localhost).
|
||||
|
@ -280,7 +200,8 @@ This command displays information about the peer that the tool is contacting
|
|||
},
|
||||
{
|
||||
Name: "peers",
|
||||
Description: "list and manage IPFS Cluster peers",
|
||||
Usage: "List and manage IPFS Cluster peers",
|
||||
Description: "List and manage IPFS Cluster peers",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "ls",
|
||||
|
@ -318,13 +239,140 @@ cluster peers.
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Add a file or directory to ipfs and pin it in the cluster",
|
||||
ArgsUsage: "<path>",
|
||||
Description: `
|
||||
Adds allows to add and replicate content to several ipfs daemons, performing
|
||||
a Cluster Pin operation on success.
|
||||
|
||||
Cluster Add is equivalent to "ipfs add" in terms of DAG building, and supports
|
||||
the same options for adjusting the chunker, the DAG layout etc. It will,
|
||||
however, send send the content directly to the destination ipfs daemons
|
||||
to which it is allocated. This may not be the local daemon (depends on the
|
||||
allocator). Once the adding process is finished, the content has been fully
|
||||
added to all allocations and pinned in them. This makes cluster add slower
|
||||
than a local ipfs add.
|
||||
|
||||
Cluster Add supports handling huge files and sharding the resulting DAG among
|
||||
several ipfs daemons (--shard). In this case, a single ipfs daemon will not
|
||||
contain the full dag, but only parts of it (shards). Desired shard size can
|
||||
be provided with the --shard-size flag.
|
||||
|
||||
We recommend setting a --name for sharded pins. Otherwise, it will be
|
||||
automatically generated.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "recursive, r",
|
||||
Usage: "Add directory paths recursively",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Value: "",
|
||||
Usage: "Sets a name for this pin",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "replication-min, rmin",
|
||||
Value: 0,
|
||||
Usage: "Sets the minimum replication factor for pinning this file",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "replication-max, rmax",
|
||||
Value: 0,
|
||||
Usage: "Sets the maximum replication factor for pinning this file",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "shard",
|
||||
Usage: "Break the file into pieces (shards) and distributed among peers",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "shard-size",
|
||||
Value: defaultShardSize,
|
||||
Usage: "Sets the maximum replication factor for pinning this file",
|
||||
},
|
||||
// cli.BoolFlag{
|
||||
// Name: "only-hashes",
|
||||
// Usage: "Write newline separated list of hashes to output",
|
||||
// },
|
||||
cli.StringFlag{
|
||||
Name: "layout, L",
|
||||
Usage: "Dag layout to use for dag generation: balanced or trickle",
|
||||
Value: "balanced",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "chunker, s",
|
||||
Usage: "Chunker selection. Fixed block size: 'size-<size>', or rabin chunker: 'rabin-<min>-<avg>-<max>'",
|
||||
Value: "size-262144",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw-leaves",
|
||||
Usage: "Use raw blocks for leaves (experimental)",
|
||||
},
|
||||
// cli.BoolFlag{
|
||||
// Name: "progress, p",
|
||||
// Usage: "Stream progress data",
|
||||
// },
|
||||
cli.BoolFlag{
|
||||
Name: "hidden, H",
|
||||
Usage: "Include files that are hidden. Only takes effect on recursive add",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
shard := c.Bool("shard")
|
||||
name := c.String("name")
|
||||
if shard && name == "" {
|
||||
randName, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// take only first letters
|
||||
name = "sharded-" + strings.Split(randName.String(), "-")[0]
|
||||
}
|
||||
|
||||
paths := make([]string, c.NArg(), c.NArg())
|
||||
for i, path := range c.Args() {
|
||||
paths[i] = path
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
checkErr("", errors.New("need at least one path"))
|
||||
}
|
||||
|
||||
// Files are all opened but not read until they are sent.
|
||||
multiFileR, err := parseFileArgs(paths, c.Bool("recursive"), c.Bool("hidden"))
|
||||
checkErr("serializing all files", err)
|
||||
cerr := globalClient.AddMultiFile(
|
||||
multiFileR,
|
||||
c.Int("replication-min"),
|
||||
c.Int("replication-max"),
|
||||
name,
|
||||
shard,
|
||||
c.Int("shard-size"),
|
||||
c.String("layout"),
|
||||
c.String("chunker"),
|
||||
c.Bool("raw-leaves"),
|
||||
c.Bool("hidden"),
|
||||
)
|
||||
// TODO: output control
|
||||
// if c.Bool("only-hashes") {
|
||||
// for i := range resp {
|
||||
// resp[i].Quiet = true
|
||||
// }
|
||||
// }
|
||||
formatResponse(c, nil, cerr)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pin",
|
||||
Description: "add, remove or list items managed by IPFS Cluster",
|
||||
Usage: "Pin and unpin and list items in IPFS Cluster",
|
||||
Description: "Pin and unpin and list items in IPFS Cluster",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "Track a CID (pin)",
|
||||
Usage: "Cluster Pin",
|
||||
Description: `
|
||||
This command tells IPFS Cluster to start managing a CID. Depending on
|
||||
the pinning strategy, this will trigger IPFS pin requests. The CID will
|
||||
|
@ -402,7 +450,7 @@ peers should pin this content.
|
|||
},
|
||||
{
|
||||
Name: "rm",
|
||||
Usage: "Stop tracking a CID (unpin)",
|
||||
Usage: "Cluster Unpin",
|
||||
Description: `
|
||||
This command tells IPFS Cluster to no longer manage a CID. This will
|
||||
trigger unpinning operations in all the IPFS nodes holding the content.
|
||||
|
@ -447,15 +495,15 @@ although unpinning operations in the cluster may take longer or fail.
|
|||
},
|
||||
{
|
||||
Name: "ls",
|
||||
Usage: "List tracked CIDs",
|
||||
Usage: "List items in the cluster pinset",
|
||||
Description: `
|
||||
This command will list the CIDs which are tracked by IPFS Cluster and to
|
||||
which peers they are currently allocated. This list does not include
|
||||
any monitoring information about the IPFS status of the CIDs, it
|
||||
merely represents the list of pins which are part of the shared state of
|
||||
the cluster. For IPFS-status information about the pins, use "status".
|
||||
Metadata CIDs used to track sharded files are hidden by default. To view
|
||||
all CIDs call with the -a flag.
|
||||
|
||||
Pins related to sharded DAGs are hidden by default (--all to show).
|
||||
`,
|
||||
ArgsUsage: "[CID]",
|
||||
Flags: []cli.Flag{
|
||||
|
@ -603,11 +651,12 @@ to check that it matches the CLI version (shown by -v).
|
|||
},
|
||||
{
|
||||
Name: "health",
|
||||
Description: "Display information on clusterhealth",
|
||||
Usage: "Cluster monitoring information",
|
||||
Description: "Cluster monitoring information",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "graph",
|
||||
Usage: "display connectivity of cluster peers",
|
||||
Usage: "create a graph displaying connectivity of cluster peers",
|
||||
Description: `
|
||||
This command queries all connected cluster peers and their ipfs peers to generate a
|
||||
graph of the connections. Output is a dot file encoding the cluster's connection state.
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/adder/sharding"
|
||||
"github.com/ipfs/ipfs-cluster/allocator/descendalloc"
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/api/rest"
|
||||
|
@ -22,7 +23,6 @@ import (
|
|||
"github.com/ipfs/ipfs-cluster/monitor/basic"
|
||||
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
|
||||
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
|
||||
"github.com/ipfs/ipfs-cluster/sharder"
|
||||
"github.com/ipfs/ipfs-cluster/state"
|
||||
"github.com/ipfs/ipfs-cluster/state/mapstate"
|
||||
"github.com/ipfs/ipfs-cluster/test"
|
||||
|
@ -85,7 +85,7 @@ func randomBytes() []byte {
|
|||
return bs
|
||||
}
|
||||
|
||||
func createComponents(t *testing.T, i int, clusterSecret []byte, staging bool) (host.Host, *Config, *raft.Consensus, API, IPFSConnector, state.State, PinTracker, PeerMonitor, PinAllocator, Informer, Sharder, *test.IpfsMock) {
|
||||
func createComponents(t *testing.T, i int, clusterSecret []byte, staging bool) (host.Host, *Config, *raft.Consensus, API, IPFSConnector, state.State, PinTracker, PeerMonitor, PinAllocator, Informer, *test.IpfsMock) {
|
||||
mock := test.NewIpfsMock()
|
||||
//
|
||||
//clusterAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", clusterPort+i))
|
||||
|
@ -104,7 +104,7 @@ func createComponents(t *testing.T, i int, clusterSecret []byte, staging bool) (
|
|||
checkErr(t, err)
|
||||
peername := fmt.Sprintf("peer_%d", i)
|
||||
|
||||
clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, trackerCfg, bmonCfg, psmonCfg, diskInfCfg, sharderCfg := testingConfigs()
|
||||
clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, trackerCfg, bmonCfg, psmonCfg, diskInfCfg := testingConfigs()
|
||||
|
||||
clusterCfg.ID = pid
|
||||
clusterCfg.Peername = peername
|
||||
|
@ -139,10 +139,7 @@ func createComponents(t *testing.T, i int, clusterSecret []byte, staging bool) (
|
|||
raftCon, err := raft.NewConsensus(host, consensusCfg, state, staging)
|
||||
checkErr(t, err)
|
||||
|
||||
sharder, err := sharder.NewSharder(sharderCfg)
|
||||
checkErr(t, err)
|
||||
|
||||
return host, clusterCfg, raftCon, api, ipfs, state, tracker, mon, alloc, inf, sharder, mock
|
||||
return host, clusterCfg, raftCon, api, ipfs, state, tracker, mon, alloc, inf, mock
|
||||
}
|
||||
|
||||
func makeMonitor(t *testing.T, h host.Host, bmonCfg *basic.Config, psmonCfg *pubsubmon.Config) PeerMonitor {
|
||||
|
@ -167,8 +164,8 @@ func createCluster(t *testing.T, host host.Host, clusterCfg *Config, raftCons *r
|
|||
}
|
||||
|
||||
func createOnePeerCluster(t *testing.T, nth int, clusterSecret []byte) (*Cluster, *test.IpfsMock) {
|
||||
host, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf, sharder, mock := createComponents(t, nth, clusterSecret, false)
|
||||
cl := createCluster(t, host, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf, sharder)
|
||||
host, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf, mock := createComponents(t, nth, clusterSecret, false)
|
||||
cl := createCluster(t, host, clusterCfg, consensusCfg, api, ipfs, state, tracker, mon, alloc, inf)
|
||||
<-cl.Ready()
|
||||
return cl, mock
|
||||
}
|
||||
|
@ -184,7 +181,6 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
|
|||
mons := make([]PeerMonitor, nClusters, nClusters)
|
||||
allocs := make([]PinAllocator, nClusters, nClusters)
|
||||
infs := make([]Informer, nClusters, nClusters)
|
||||
sharders := make([]Sharder, nClusters, nClusters)
|
||||
ipfsMocks := make([]*test.IpfsMock, nClusters, nClusters)
|
||||
|
||||
hosts := make([]host.Host, nClusters, nClusters)
|
||||
|
@ -195,7 +191,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
|
|||
|
||||
for i := 0; i < nClusters; i++ {
|
||||
// staging = true for all except first (i==0)
|
||||
host, clusterCfg, raftCon, api, ipfs, state, tracker, mon, alloc, inf, sharder, mock := createComponents(t, i, testingClusterSecret, i != 0)
|
||||
host, clusterCfg, raftCon, api, ipfs, state, tracker, mon, alloc, inf, mock := createComponents(t, i, testingClusterSecret, i != 0)
|
||||
hosts[i] = host
|
||||
cfgs[i] = clusterCfg
|
||||
raftCons[i] = raftCon
|
||||
|
@ -206,7 +202,6 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
|
|||
mons[i] = mon
|
||||
allocs[i] = alloc
|
||||
infs[i] = inf
|
||||
sharders[i] = sharder
|
||||
ipfsMocks[i] = mock
|
||||
}
|
||||
|
||||
|
@ -225,13 +220,13 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
|
|||
}
|
||||
|
||||
// Start first node
|
||||
clusters[0] = createCluster(t, hosts[0], cfgs[0], raftCons[0], apis[0], ipfss[0], states[0], trackers[0], mons[0], allocs[0], infs[0], sharders[0])
|
||||
clusters[0] = createCluster(t, hosts[0], cfgs[0], raftCons[0], apis[0], ipfss[0], states[0], trackers[0], mons[0], allocs[0], infs[0])
|
||||
<-clusters[0].Ready()
|
||||
bootstrapAddr, _ := ma.NewMultiaddr(fmt.Sprintf("%s/ipfs/%s", clusters[0].host.Addrs()[0], clusters[0].id.Pretty()))
|
||||
|
||||
// Start the rest and join
|
||||
for i := 1; i < nClusters; i++ {
|
||||
clusters[i] = createCluster(t, hosts[i], cfgs[i], raftCons[i], apis[i], ipfss[i], states[i], trackers[i], mons[i], allocs[i], infs[i], sharders[i])
|
||||
clusters[i] = createCluster(t, hosts[i], cfgs[i], raftCons[i], apis[i], ipfss[i], states[i], trackers[i], mons[i], allocs[i], infs[i])
|
||||
err := clusters[i].Join(bootstrapAddr)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
|
@ -1784,7 +1779,7 @@ func TestClustersAddFileShard(t *testing.T) {
|
|||
if !ok {
|
||||
t.Fatalf("ipfs does not store cdag data")
|
||||
}
|
||||
cdagNode, err := sharder.CborDataToNode(cdagBytes, "cbor")
|
||||
cdagNode, err := sharding.CborDataToNode(cdagBytes, "cbor")
|
||||
if err != nil {
|
||||
t.Fatalf("cdag bytes cannot be parsed to ipld-cbor node")
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func testPin(t *testing.T, method string) {
|
|||
ipfs.config.PinMethod = method
|
||||
|
||||
c, _ := cid.Decode(test.TestCid1)
|
||||
err := ipfs.Pin(ctx, c, true)
|
||||
err := ipfs.Pin(ctx, c, -1)
|
||||
if err != nil {
|
||||
t.Error("expected success pinning cid")
|
||||
}
|
||||
|
@ -93,12 +93,12 @@ func testPin(t *testing.T, method string) {
|
|||
if err != nil {
|
||||
t.Fatal("expected success doing ls")
|
||||
}
|
||||
if !pinSt.IsPinned() {
|
||||
if !pinSt.IsPinned(-1) {
|
||||
t.Error("cid should have been pinned")
|
||||
}
|
||||
|
||||
c2, _ := cid.Decode(test.ErrorCid)
|
||||
err = ipfs.Pin(ctx, c2, true)
|
||||
err = ipfs.Pin(ctx, c2, -1)
|
||||
if err == nil {
|
||||
t.Error("expected error pinning cid")
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ func TestIPFSUnpin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error("expected success unpinning non-pinned cid")
|
||||
}
|
||||
ipfs.Pin(ctx, c, true)
|
||||
ipfs.Pin(ctx, c, -1)
|
||||
err = ipfs.Unpin(ctx, c)
|
||||
if err != nil {
|
||||
t.Error("expected success unpinning pinned cid")
|
||||
|
@ -134,9 +134,9 @@ func TestIPFSPinLsCid(t *testing.T) {
|
|||
c, _ := cid.Decode(test.TestCid1)
|
||||
c2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
ipfs.Pin(ctx, c, true)
|
||||
ipfs.Pin(ctx, c, -1)
|
||||
ips, err := ipfs.PinLsCid(ctx, c)
|
||||
if err != nil || !ips.IsPinned() {
|
||||
if err != nil || !ips.IsPinned(-1) {
|
||||
t.Error("c should appear pinned")
|
||||
}
|
||||
|
||||
|
@ -154,8 +154,8 @@ func TestIPFSPinLs(t *testing.T) {
|
|||
c, _ := cid.Decode(test.TestCid1)
|
||||
c2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
ipfs.Pin(ctx, c, true)
|
||||
ipfs.Pin(ctx, c2, true)
|
||||
ipfs.Pin(ctx, c, -1)
|
||||
ipfs.Pin(ctx, c2, -1)
|
||||
ipsMap, err := ipfs.PinLs(ctx, "")
|
||||
if err != nil {
|
||||
t.Error("should not error")
|
||||
|
@ -165,7 +165,7 @@ func TestIPFSPinLs(t *testing.T) {
|
|||
t.Fatal("the map does not contain expected keys")
|
||||
}
|
||||
|
||||
if !ipsMap[test.TestCid1].IsPinned() || !ipsMap[test.TestCid2].IsPinned() {
|
||||
if !ipsMap[test.TestCid1].IsPinned(-1) || !ipsMap[test.TestCid2].IsPinned(-1) {
|
||||
t.Error("c1 and c2 should appear pinned")
|
||||
}
|
||||
}
|
||||
|
@ -772,7 +772,7 @@ func TestRepoSize(t *testing.T) {
|
|||
}
|
||||
|
||||
c, _ := cid.Decode(test.TestCid1)
|
||||
err = ipfs.Pin(ctx, c, true)
|
||||
err = ipfs.Pin(ctx, c, -1)
|
||||
if err != nil {
|
||||
t.Error("expected success pinning cid")
|
||||
}
|
||||
|
|
|
@ -59,6 +59,14 @@ func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *s
|
|||
return nil
|
||||
}
|
||||
|
||||
func testPin(c *cid.Cid, min, max int, allocs ...peer.ID) api.Pin {
|
||||
pin := api.PinCid(c)
|
||||
pin.ReplicationFactorMin = min
|
||||
pin.ReplicationFactorMax = max
|
||||
pin.Allocations = allocs
|
||||
return pin
|
||||
}
|
||||
|
||||
func testSlowMapPinTracker(t *testing.T) *MapPinTracker {
|
||||
cfg := &Config{}
|
||||
cfg.Default()
|
||||
|
@ -101,12 +109,7 @@ func TestTrack(t *testing.T) {
|
|||
h, _ := cid.Decode(test.TestCid1)
|
||||
|
||||
// Let's tart with a local pin
|
||||
c := api.Pin{
|
||||
Cid: h,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h, -1, -1)
|
||||
|
||||
err := mpt.Track(c)
|
||||
if err != nil {
|
||||
|
@ -121,12 +124,7 @@ func TestTrack(t *testing.T) {
|
|||
}
|
||||
|
||||
// Unpin and set remote
|
||||
c = api.Pin{
|
||||
Cid: h,
|
||||
Allocations: []peer.ID{test.TestPeerID2},
|
||||
ReplicationFactorMin: 1,
|
||||
ReplicationFactorMax: 1,
|
||||
}
|
||||
c = testPin(h, 1, 1, test.TestPeerID2)
|
||||
err = mpt.Track(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -148,12 +146,7 @@ func TestUntrack(t *testing.T) {
|
|||
h2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
// LocalPin
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h1, -1, -1)
|
||||
|
||||
err := mpt.Track(c)
|
||||
if err != nil {
|
||||
|
@ -161,12 +154,7 @@ func TestUntrack(t *testing.T) {
|
|||
}
|
||||
|
||||
// Remote pin
|
||||
c = api.Pin{
|
||||
Cid: h2,
|
||||
Allocations: []peer.ID{test.TestPeerID2},
|
||||
ReplicationFactorMin: 1,
|
||||
ReplicationFactorMax: 1,
|
||||
}
|
||||
c = testPin(h2, 1, 1, test.TestPeerID2)
|
||||
err = mpt.Track(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -208,19 +196,9 @@ func TestStatusAll(t *testing.T) {
|
|||
h2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
// LocalPin
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h1, -1, -1)
|
||||
mpt.Track(c)
|
||||
c = api.Pin{
|
||||
Cid: h2,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: 1,
|
||||
ReplicationFactorMax: 1,
|
||||
}
|
||||
c = testPin(h2, 1, 1)
|
||||
mpt.Track(c)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
@ -248,19 +226,9 @@ func TestSyncAndRecover(t *testing.T) {
|
|||
h1, _ := cid.Decode(test.TestCid1)
|
||||
h2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h1, -1, -1)
|
||||
mpt.Track(c)
|
||||
c = api.Pin{
|
||||
Cid: h2,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c = testPin(h2, -1, -1)
|
||||
mpt.Track(c)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
@ -309,12 +277,7 @@ func TestRecoverAll(t *testing.T) {
|
|||
|
||||
h1, _ := cid.Decode(test.TestCid1)
|
||||
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h1, -1, -1)
|
||||
mpt.Track(c)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
mpt.optracker.SetError(h1, errors.New("fakeerror"))
|
||||
|
@ -351,20 +314,9 @@ func TestSyncAll(t *testing.T) {
|
|||
h1, _ := cid.Decode(test.TestCid1)
|
||||
h2, _ := cid.Decode(test.TestCid2)
|
||||
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
|
||||
c := testPin(h1, -1, -1)
|
||||
mpt.Track(c)
|
||||
c = api.Pin{
|
||||
Cid: h2,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c = testPin(h2, -1, -1)
|
||||
mpt.Track(c)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
@ -386,13 +338,7 @@ func TestUntrackTrack(t *testing.T) {
|
|||
h1, _ := cid.Decode(test.TestCid1)
|
||||
|
||||
// LocalPin
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
|
||||
c := testPin(h1, -1, -1)
|
||||
err := mpt.Track(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -413,12 +359,7 @@ func TestTrackUntrackWithCancel(t *testing.T) {
|
|||
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
|
||||
|
||||
// LocalPin
|
||||
slowPin := api.Pin{
|
||||
Cid: slowPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
slowPin := testPin(slowPinCid, -1, -1)
|
||||
|
||||
err := mpt.Track(slowPin)
|
||||
if err != nil {
|
||||
|
@ -458,20 +399,10 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
|
|||
fastPinCid, _ := cid.Decode(pinCancelCid)
|
||||
|
||||
// SlowLocalPin
|
||||
slowPin := api.Pin{
|
||||
Cid: slowPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
slowPin := testPin(slowPinCid, -1, -1)
|
||||
|
||||
// LocalPin
|
||||
fastPin := api.Pin{
|
||||
Cid: fastPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
fastPin := testPin(fastPinCid, -1, -1)
|
||||
|
||||
err := mpt.Track(slowPin)
|
||||
if err != nil {
|
||||
|
@ -512,12 +443,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
|
|||
slowPinCid, _ := cid.Decode(test.TestSlowCid1)
|
||||
|
||||
// LocalPin
|
||||
slowPin := api.Pin{
|
||||
Cid: slowPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
slowPin := testPin(slowPinCid, -1, -1)
|
||||
|
||||
err := mpt.Track(slowPin)
|
||||
if err != nil {
|
||||
|
@ -567,20 +493,10 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
|
|||
fastPinCid, _ := cid.Decode(unpinCancelCid)
|
||||
|
||||
// SlowLocalPin
|
||||
slowPin := api.Pin{
|
||||
Cid: slowPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
slowPin := testPin(slowPinCid, -1, -1)
|
||||
|
||||
// LocalPin
|
||||
fastPin := api.Pin{
|
||||
Cid: fastPinCid,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
fastPin := testPin(fastPinCid, -1, -1)
|
||||
|
||||
err := mpt.Track(slowPin)
|
||||
if err != nil {
|
||||
|
@ -631,12 +547,7 @@ func TestTrackUntrackConcurrent(t *testing.T) {
|
|||
h1, _ := cid.Decode(test.TestCid1)
|
||||
|
||||
// LocalPin
|
||||
c := api.Pin{
|
||||
Cid: h1,
|
||||
Allocations: []peer.ID{},
|
||||
ReplicationFactorMin: -1,
|
||||
ReplicationFactorMax: -1,
|
||||
}
|
||||
c := testPin(h1, -1, -1)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
|
|
|
@ -16,10 +16,14 @@ var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
|
|||
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
|
||||
|
||||
var c = api.Pin{
|
||||
Cid: testCid1,
|
||||
Allocations: []peer.ID{testPeerID1},
|
||||
ReplicationFactorMax: -1,
|
||||
ReplicationFactorMin: -1,
|
||||
Cid: testCid1,
|
||||
Allocations: []peer.ID{testPeerID1},
|
||||
MaxDepth: -1,
|
||||
PinOptions: api.PinOptions{
|
||||
ReplicationFactorMax: -1,
|
||||
ReplicationFactorMin: -1,
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
|
|
|
@ -63,8 +63,8 @@ func TestRPCMockValid(t *testing.T) {
|
|||
|
||||
// Test that testing directory is created without error
|
||||
func TestGenerateTestDirs(t *testing.T) {
|
||||
_, err := GetTestingDirSerial()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sth := NewShardingTestHelper()
|
||||
defer sth.Clean()
|
||||
_ = sth.GetTreeMultiReader(t)
|
||||
_ = sth.GetRandFileMultiReader(t, 2)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user