golint govets

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
Hector Sanjuan 2018-07-24 14:21:29 +02:00
parent 6b9000fe7b
commit 327a81b85a
12 changed files with 42 additions and 27 deletions

View File

@ -10,6 +10,7 @@ import (
var logger = logging.Logger("adder")
// Adder represents a module capable of adding content to IPFS Cluster.
type Adder interface {
// FromMultipart adds from a multipart reader and returns
// the resulting CID.

View File

@ -74,11 +74,14 @@ func (imp *Importer) start() bool {
return !retVal
}
// ImportFile chunks a File and sends the results (blocks) to the
// importer channels.
// Go starts a goroutine which reads the blocks as outputted by the
// ipfsadd module called with the parameters of this importer. The blocks,
// errors and output are placed in the respective importer channels for
// further processing. When there are no more blocks, or an error happen,
// the channels will be closed.
func (imp *Importer) Go(ctx context.Context) error {
if !imp.start() {
return errors.New("importing process already started or finished.")
return errors.New("importing process already started or finished")
}
dagsvc := newAdderDAGService(imp.blocks)
@ -133,7 +136,8 @@ func (imp *Importer) Go(ctx context.Context) error {
return nil
}
// Run calls the given BlockHandler every node read from the importer.
// Run triggers the importing process (calling Go) and calls the given BlockHandler
// on every node read from the importer.
// It returns the value returned by the last-called BlockHandler.
func (imp *Importer) Run(ctx context.Context, blockF BlockHandler) (string, error) {
var retVal string

View File

@ -1,4 +1,4 @@
// The ipfsadd package is a simplified copy of go-ipfs/core/coreunix/add.go
// Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go
package ipfsadd
import (

View File

@ -21,10 +21,15 @@ import (
var logger = logging.Logger("addlocal")
// Adder is an implementation of IPFS Cluster Adder interface,
// which allows adding content directly to IPFS daemons attached
// to the Cluster (without sharding).
type Adder struct {
rpcClient *rpc.Client
}
// New returns a new Adder with the given rpc Client. The client is used
// to perform calls to IPFSBlockPut and Pin content on Cluster.
func New(rpc *rpc.Client) *Adder {
return &Adder{
rpcClient: rpc,
@ -63,6 +68,7 @@ func (a *Adder) putBlock(ctx context.Context, n *api.NodeWithMeta, dests []peer.
return rpcutil.CheckErrs(errs)
}
// FromMultipart allows to add a file encoded as multipart.
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder.Params) (*cid.Cid, error) {
f := &files.MultipartFile{
Mediatype: "multipart/form-data",
@ -101,7 +107,7 @@ func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder
lastCid, err := cid.Decode(lastCidStr)
if err != nil {
return nil, errors.New("nothing imported. Invalid Cid!")
return nil, errors.New("nothing imported: invalid Cid")
}
// Finally, cluster pin the result

View File

@ -8,6 +8,7 @@ import (
"github.com/ipfs/ipfs-cluster/api"
)
// DefaultShardSize is the shard size for params objects created with DefaultParams().
var DefaultShardSize = uint64(100 * 1024 * 1024) // 100 MB
// Params contains all of the configurable parameters needed to specify the

View File

@ -1,4 +1,4 @@
// package sharding implements a sharding adder that chunks and
// Package sharding implements a sharding adder that chunks and
// shards content while it's added, creating Cluster DAGs and
// pinning them.
package sharding
@ -20,16 +20,23 @@ import (
var logger = logging.Logger("addshard")
// Adder is an implementation of IPFS Cluster's Adder interface which
// shards content while adding among several IPFS Cluster peers,
// creating a Cluster DAG to track and pin that content selectively
// in the IPFS daemons allocated to it.
type Adder struct {
rpcClient *rpc.Client
}
// New returns a new Adder, which uses the given rpc client to perform
// Allocate, IPFSBlockPut and Pin requests to other cluster components.
func New(rpc *rpc.Client) *Adder {
return &Adder{
rpcClient: rpc,
}
}
// FromMultipart allows to add (and shard) a file encoded as multipart.
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder.Params) (*cid.Cid, error) {
logger.Debugf("adding from multipart with params: %+v", p)

View File

@ -42,7 +42,7 @@ func VerifyShards(t *testing.T, rootCid *cid.Cid, pins MockPinStore, ipfs MockBl
}
if !clusterPin.Reference.Equals(metaPin.Cid) {
return nil, fmt.Errorf("ClusterDAG should reference the MetaPin")
return nil, fmt.Errorf("clusterDAG should reference the MetaPin")
}
clusterDAGBlock, err := ipfs.BlockGet(clusterPin.Cid)

View File

@ -670,7 +670,7 @@ type Pin struct {
}
// PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to
// be recursive and the pin to be of DataType
// be recursive and the pin to be of DataType.
func PinCid(c *cid.Cid) Pin {
return Pin{
Cid: c,
@ -680,6 +680,8 @@ func PinCid(c *cid.Cid) Pin {
}
}
// PinWithOpts creates a new Pin calling PinCid(c) and then sets
// its PinOptions fields with the given options.
func PinWithOpts(c *cid.Cid, opts PinOptions) Pin {
p := PinCid(c)
p.ReplicationFactorMin = opts.ReplicationFactorMin
@ -825,11 +827,12 @@ type NodeWithMeta struct {
Data []byte
Cid string
CumSize uint64 //Cumulative size
CumSize uint64 // Cumulative size
Format string
}
// Returns how big is the block
// Size returns how big is the block. It is different from CumSize, which
// records the size of the underlying tree.
func (n *NodeWithMeta) Size() uint64 {
return uint64(len(n.Data))
}

View File

@ -981,14 +981,14 @@ func (c *Cluster) setupPin(pin *api.Pin) error {
return errors.New("must pin roots directly")
}
if pin.Reference == nil {
return errors.New("ClusterDAG pins should reference a Meta pin")
return errors.New("clusterDAG pins should reference a Meta pin")
}
case api.MetaType:
if pin.Allocations != nil && len(pin.Allocations) != 0 {
return errors.New("meta pin should not specify allocations")
}
if pin.Reference == nil {
return errors.New("MetaPins should reference a ClusterDAG")
return errors.New("metaPins should reference a ClusterDAG")
}
default:

View File

@ -112,7 +112,7 @@ func (ipfs *mockConnector) BlockPut(nwm api.NodeWithMeta) error {
func (ipfs *mockConnector) BlockGet(c *cid.Cid) ([]byte, error) {
d, ok := ipfs.blocks.Load(c.String())
if !ok {
errors.New("block not found")
return nil, errors.New("block not found")
}
return d.([]byte), nil
}

View File

@ -238,9 +238,9 @@ func (mock *mockService) RecoverLocal(ctx context.Context, in api.PinSerial, out
return mock.TrackerRecover(ctx, in, out)
}
func (rpcapi *mockService) Allocate(ctx context.Context, in api.PinSerial, out *[]string) error {
func (mock *mockService) Allocate(ctx context.Context, in api.PinSerial, out *[]string) error {
if in.ReplicationFactorMin > 1 {
return errors.New("ReplMin too high: can only mock-allocate to 1")
return errors.New("replMin too high: can only mock-allocate to 1")
}
*out = []string{""} // local peer
return nil

View File

@ -17,20 +17,11 @@ const shardingTestFile = "testFile"
// Variables related to adding the testing directory generated by tests
var (
// Shard and Cdag Cids
TestShardCid = "zdpuAoiNm1ntWx6jpgcReTiCWFHJSTpvTw4bAAn9p6yDnznqh"
TestShardCid2 = "zdpuAmUorxmxhrk96mVxQTuUi6QioKzKQKK8XvzU5WURU4Qea"
TestCdagCid = "zdpuAyVKsP6xvx1p81pKi7faxUs2GuD2ZG4o3CwMycvCLyuhK"
TestCdagCid2 = "zdpuAynm14qkpVPMMazNjkz3nJYhtTXJ3TpRp5aEkoMHBwmKc"
TestMetaRootCid = "QmYCLpFCj9Av8NFjkQogvtXspnTDFWaizLpVFEijHTH4eV"
TestMetaRootCid2 = "QmUatEiCFxtckNae8XyDVGwL1WZq8cVKTMacDkqz8zAE3q"
TestShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a")
TestShard2Data, _ = hex.DecodeString("a26130d82a5823001220a736215b7487e753686cc4e965ca62dc45d46303ef35349aee69a647c22ac57f6131d82a58230012205ccb8c75199ea2b1ef52a03ecaaa3186dd04fe0f8133aa0b2d5195c3c0844a10")
TestCdagData, _ = hex.DecodeString("a16130d82a5825000171122030e9b9b4f1bc4b5a3759a93b4e77983cd053f84174e1b0cd628dc6c32fb0da14")
TestCdagData2, _ = hex.DecodeString("a26130d82a582500017112200fb89a9189514be1d5418e0890367992b651af390f06d3751c672cba6c00b4276131d82a5825000171122030e9b9b4f1bc4b5a3759a93b4e77983cd053f84174e1b0cd628dc6c32fb0da14")
NumShardingDirPrints = 15
ShardingDirBalancedRootCID = "QmbfGRPTUd7L1xsAZZ1A3kUFP1zkEZ9kHdb6AGaajBzGGX"
ShardingDirTrickleRootCID = "QmcqtKBVCrgZBXksfYzUxmw6S2rkyQhEhckqFBAUBcS1qz"
@ -110,6 +101,8 @@ func (sth *ShardingTestHelper) GetTreeMultiReader(t *testing.T) *files.MultiFile
return files.NewMultiFileReader(slf, true)
}
// GetTreeSerialFile returns a files.SerialFile pointing to the testing
// directory tree (see GetTreeMultiReader).
func (sth *ShardingTestHelper) GetTreeSerialFile(t *testing.T) files.File {
st := sth.makeTree(t)
sf, err := files.NewSerialFile(shardingTestTree, sth.path(shardingTestTree), false, st)