From 327a81b85a03c57fd29ec97e09bea2b3905ece38 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 24 Jul 2018 14:21:29 +0200 Subject: [PATCH] golint govets License: MIT Signed-off-by: Hector Sanjuan --- adder/adder.go | 1 + adder/importer.go | 12 ++++++++---- adder/ipfsadd/add.go | 2 +- adder/local/adder.go | 8 +++++++- adder/params.go | 1 + adder/sharding/adder.go | 9 ++++++++- adder/sharding/verify.go | 2 +- api/types.go | 9 ++++++--- cluster.go | 4 ++-- cluster_test.go | 2 +- test/rpc_api_mock.go | 4 ++-- test/sharding.go | 15 ++++----------- 12 files changed, 42 insertions(+), 27 deletions(-) diff --git a/adder/adder.go b/adder/adder.go index 8328a80a..bc5957fb 100644 --- a/adder/adder.go +++ b/adder/adder.go @@ -10,6 +10,7 @@ import ( var logger = logging.Logger("adder") +// Adder represents a module capable of adding content to IPFS Cluster. type Adder interface { // FromMultipart adds from a multipart reader and returns // the resulting CID. diff --git a/adder/importer.go b/adder/importer.go index e9043ceb..9fe8ec87 100644 --- a/adder/importer.go +++ b/adder/importer.go @@ -74,11 +74,14 @@ func (imp *Importer) start() bool { return !retVal } -// ImportFile chunks a File and sends the results (blocks) to the -// importer channels. +// Go starts a goroutine which reads the blocks as outputted by the +// ipfsadd module called with the parameters of this importer. The blocks, +// errors and output are placed in the respective importer channels for +// further processing. When there are no more blocks, or an error happen, +// the channels will be closed. func (imp *Importer) Go(ctx context.Context) error { if !imp.start() { - return errors.New("importing process already started or finished.") + return errors.New("importing process already started or finished") } dagsvc := newAdderDAGService(imp.blocks) @@ -133,7 +136,8 @@ func (imp *Importer) Go(ctx context.Context) error { return nil } -// Run calls the given BlockHandler every node read from the importer. +// Run triggers the importing process (calling Go) and calls the given BlockHandler +// on every node read from the importer. // It returns the value returned by the last-called BlockHandler. func (imp *Importer) Run(ctx context.Context, blockF BlockHandler) (string, error) { var retVal string diff --git a/adder/ipfsadd/add.go b/adder/ipfsadd/add.go index 48874cb3..2823cdbe 100644 --- a/adder/ipfsadd/add.go +++ b/adder/ipfsadd/add.go @@ -1,4 +1,4 @@ -// The ipfsadd package is a simplified copy of go-ipfs/core/coreunix/add.go +// Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go package ipfsadd import ( diff --git a/adder/local/adder.go b/adder/local/adder.go index 907c9422..b0d3dc47 100644 --- a/adder/local/adder.go +++ b/adder/local/adder.go @@ -21,10 +21,15 @@ import ( var logger = logging.Logger("addlocal") +// Adder is an implementation of IPFS Cluster Adder interface, +// which allows adding content directly to IPFS daemons attached +// to the Cluster (without sharding). type Adder struct { rpcClient *rpc.Client } +// New returns a new Adder with the given rpc Client. The client is used +// to perform calls to IPFSBlockPut and Pin content on Cluster. func New(rpc *rpc.Client) *Adder { return &Adder{ rpcClient: rpc, @@ -63,6 +68,7 @@ func (a *Adder) putBlock(ctx context.Context, n *api.NodeWithMeta, dests []peer. return rpcutil.CheckErrs(errs) } +// FromMultipart allows to add a file encoded as multipart. func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder.Params) (*cid.Cid, error) { f := &files.MultipartFile{ Mediatype: "multipart/form-data", @@ -101,7 +107,7 @@ func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder lastCid, err := cid.Decode(lastCidStr) if err != nil { - return nil, errors.New("nothing imported. Invalid Cid!") + return nil, errors.New("nothing imported: invalid Cid") } // Finally, cluster pin the result diff --git a/adder/params.go b/adder/params.go index 082cbdf7..98de298c 100644 --- a/adder/params.go +++ b/adder/params.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/ipfs-cluster/api" ) +// DefaultShardSize is the shard size for params objects created with DefaultParams(). var DefaultShardSize = uint64(100 * 1024 * 1024) // 100 MB // Params contains all of the configurable parameters needed to specify the diff --git a/adder/sharding/adder.go b/adder/sharding/adder.go index f72a0df9..eb5a0112 100644 --- a/adder/sharding/adder.go +++ b/adder/sharding/adder.go @@ -1,4 +1,4 @@ -// package sharding implements a sharding adder that chunks and +// Package sharding implements a sharding adder that chunks and // shards content while it's added, creating Cluster DAGs and // pinning them. package sharding @@ -20,16 +20,23 @@ import ( var logger = logging.Logger("addshard") +// Adder is an implementation of IPFS Cluster's Adder interface which +// shards content while adding among several IPFS Cluster peers, +// creating a Cluster DAG to track and pin that content selectively +// in the IPFS daemons allocated to it. type Adder struct { rpcClient *rpc.Client } +// New returns a new Adder, which uses the given rpc client to perform +// Allocate, IPFSBlockPut and Pin requests to other cluster components. func New(rpc *rpc.Client) *Adder { return &Adder{ rpcClient: rpc, } } +// FromMultipart allows to add (and shard) a file encoded as multipart. func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder.Params) (*cid.Cid, error) { logger.Debugf("adding from multipart with params: %+v", p) diff --git a/adder/sharding/verify.go b/adder/sharding/verify.go index e331bf0d..83fd5546 100644 --- a/adder/sharding/verify.go +++ b/adder/sharding/verify.go @@ -42,7 +42,7 @@ func VerifyShards(t *testing.T, rootCid *cid.Cid, pins MockPinStore, ipfs MockBl } if !clusterPin.Reference.Equals(metaPin.Cid) { - return nil, fmt.Errorf("ClusterDAG should reference the MetaPin") + return nil, fmt.Errorf("clusterDAG should reference the MetaPin") } clusterDAGBlock, err := ipfs.BlockGet(clusterPin.Cid) diff --git a/api/types.go b/api/types.go index ee217fc2..ed1ff61e 100644 --- a/api/types.go +++ b/api/types.go @@ -670,7 +670,7 @@ type Pin struct { } // PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to -// be recursive and the pin to be of DataType +// be recursive and the pin to be of DataType. func PinCid(c *cid.Cid) Pin { return Pin{ Cid: c, @@ -680,6 +680,8 @@ func PinCid(c *cid.Cid) Pin { } } +// PinWithOpts creates a new Pin calling PinCid(c) and then sets +// its PinOptions fields with the given options. func PinWithOpts(c *cid.Cid, opts PinOptions) Pin { p := PinCid(c) p.ReplicationFactorMin = opts.ReplicationFactorMin @@ -825,11 +827,12 @@ type NodeWithMeta struct { Data []byte Cid string - CumSize uint64 //Cumulative size + CumSize uint64 // Cumulative size Format string } -// Returns how big is the block +// Size returns how big is the block. It is different from CumSize, which +// records the size of the underlying tree. func (n *NodeWithMeta) Size() uint64 { return uint64(len(n.Data)) } diff --git a/cluster.go b/cluster.go index d441466d..6e8afc21 100644 --- a/cluster.go +++ b/cluster.go @@ -981,14 +981,14 @@ func (c *Cluster) setupPin(pin *api.Pin) error { return errors.New("must pin roots directly") } if pin.Reference == nil { - return errors.New("ClusterDAG pins should reference a Meta pin") + return errors.New("clusterDAG pins should reference a Meta pin") } case api.MetaType: if pin.Allocations != nil && len(pin.Allocations) != 0 { return errors.New("meta pin should not specify allocations") } if pin.Reference == nil { - return errors.New("MetaPins should reference a ClusterDAG") + return errors.New("metaPins should reference a ClusterDAG") } default: diff --git a/cluster_test.go b/cluster_test.go index 7944a32f..be318e19 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -112,7 +112,7 @@ func (ipfs *mockConnector) BlockPut(nwm api.NodeWithMeta) error { func (ipfs *mockConnector) BlockGet(c *cid.Cid) ([]byte, error) { d, ok := ipfs.blocks.Load(c.String()) if !ok { - errors.New("block not found") + return nil, errors.New("block not found") } return d.([]byte), nil } diff --git a/test/rpc_api_mock.go b/test/rpc_api_mock.go index 58d68b5e..9918c490 100644 --- a/test/rpc_api_mock.go +++ b/test/rpc_api_mock.go @@ -238,9 +238,9 @@ func (mock *mockService) RecoverLocal(ctx context.Context, in api.PinSerial, out return mock.TrackerRecover(ctx, in, out) } -func (rpcapi *mockService) Allocate(ctx context.Context, in api.PinSerial, out *[]string) error { +func (mock *mockService) Allocate(ctx context.Context, in api.PinSerial, out *[]string) error { if in.ReplicationFactorMin > 1 { - return errors.New("ReplMin too high: can only mock-allocate to 1") + return errors.New("replMin too high: can only mock-allocate to 1") } *out = []string{""} // local peer return nil diff --git a/test/sharding.go b/test/sharding.go index dcdfaa21..b9cd82d7 100644 --- a/test/sharding.go +++ b/test/sharding.go @@ -17,20 +17,11 @@ const shardingTestFile = "testFile" // Variables related to adding the testing directory generated by tests var ( + // Shard and Cdag Cids TestShardCid = "zdpuAoiNm1ntWx6jpgcReTiCWFHJSTpvTw4bAAn9p6yDnznqh" - TestShardCid2 = "zdpuAmUorxmxhrk96mVxQTuUi6QioKzKQKK8XvzU5WURU4Qea" - TestCdagCid = "zdpuAyVKsP6xvx1p81pKi7faxUs2GuD2ZG4o3CwMycvCLyuhK" - TestCdagCid2 = "zdpuAynm14qkpVPMMazNjkz3nJYhtTXJ3TpRp5aEkoMHBwmKc" - TestMetaRootCid = "QmYCLpFCj9Av8NFjkQogvtXspnTDFWaizLpVFEijHTH4eV" - TestMetaRootCid2 = "QmUatEiCFxtckNae8XyDVGwL1WZq8cVKTMacDkqz8zAE3q" + TestShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a") - TestShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a") - TestShard2Data, _ = hex.DecodeString("a26130d82a5823001220a736215b7487e753686cc4e965ca62dc45d46303ef35349aee69a647c22ac57f6131d82a58230012205ccb8c75199ea2b1ef52a03ecaaa3186dd04fe0f8133aa0b2d5195c3c0844a10") - TestCdagData, _ = hex.DecodeString("a16130d82a5825000171122030e9b9b4f1bc4b5a3759a93b4e77983cd053f84174e1b0cd628dc6c32fb0da14") - TestCdagData2, _ = hex.DecodeString("a26130d82a582500017112200fb89a9189514be1d5418e0890367992b651af390f06d3751c672cba6c00b4276131d82a5825000171122030e9b9b4f1bc4b5a3759a93b4e77983cd053f84174e1b0cd628dc6c32fb0da14") - - NumShardingDirPrints = 15 ShardingDirBalancedRootCID = "QmbfGRPTUd7L1xsAZZ1A3kUFP1zkEZ9kHdb6AGaajBzGGX" ShardingDirTrickleRootCID = "QmcqtKBVCrgZBXksfYzUxmw6S2rkyQhEhckqFBAUBcS1qz" @@ -110,6 +101,8 @@ func (sth *ShardingTestHelper) GetTreeMultiReader(t *testing.T) *files.MultiFile return files.NewMultiFileReader(slf, true) } +// GetTreeSerialFile returns a files.SerialFile pointing to the testing +// directory tree (see GetTreeMultiReader). func (sth *ShardingTestHelper) GetTreeSerialFile(t *testing.T) files.File { st := sth.makeTree(t) sf, err := files.NewSerialFile(shardingTestTree, sth.path(shardingTestTree), false, st)