2018-07-24 12:21:29 +00:00
|
|
|
// Package sharding implements a sharding adder that chunks and
|
2018-07-04 16:30:24 +00:00
|
|
|
// shards content while it's added, creating Cluster DAGs and
|
|
|
|
// pinning them.
|
|
|
|
package sharding
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-07-19 13:17:27 +00:00
|
|
|
"fmt"
|
2018-07-04 16:30:24 +00:00
|
|
|
"mime/multipart"
|
|
|
|
|
|
|
|
"github.com/ipfs/ipfs-cluster/adder"
|
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
|
|
|
|
|
|
rpc "github.com/hsanjuan/go-libp2p-gorpc"
|
2018-08-06 10:44:44 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
|
|
|
files "github.com/ipfs/go-ipfs-cmdkit/files"
|
2018-07-04 16:30:24 +00:00
|
|
|
logging "github.com/ipfs/go-log"
|
|
|
|
)
|
|
|
|
|
|
|
|
var logger = logging.Logger("addshard")
|
2018-08-06 20:49:28 +00:00
|
|
|
var outputBuffer = 200
|
2018-07-04 16:30:24 +00:00
|
|
|
|
2018-07-24 12:21:29 +00:00
|
|
|
// Adder is an implementation of IPFS Cluster's Adder interface which
|
|
|
|
// shards content while adding among several IPFS Cluster peers,
|
|
|
|
// creating a Cluster DAG to track and pin that content selectively
|
|
|
|
// in the IPFS daemons allocated to it.
|
2018-07-04 16:30:24 +00:00
|
|
|
type Adder struct {
|
|
|
|
rpcClient *rpc.Client
|
2018-08-06 20:49:28 +00:00
|
|
|
|
|
|
|
output chan *api.AddedOutput
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
2018-07-24 12:21:29 +00:00
|
|
|
// New returns a new Adder, which uses the given rpc client to perform
|
|
|
|
// Allocate, IPFSBlockPut and Pin requests to other cluster components.
|
2018-08-06 20:49:28 +00:00
|
|
|
func New(rpc *rpc.Client, discardOutput bool) *Adder {
|
|
|
|
output := make(chan *api.AddedOutput, outputBuffer)
|
|
|
|
if discardOutput {
|
|
|
|
go func() {
|
|
|
|
for range output {
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-07-04 16:30:24 +00:00
|
|
|
return &Adder{
|
|
|
|
rpcClient: rpc,
|
2018-08-06 20:49:28 +00:00
|
|
|
output: output,
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-06 20:49:28 +00:00
|
|
|
// Output returns a channel for output updates during the adding process.
|
|
|
|
func (a *Adder) Output() <-chan *api.AddedOutput {
|
|
|
|
return a.output
|
|
|
|
}
|
|
|
|
|
2018-07-24 12:21:29 +00:00
|
|
|
// FromMultipart allows to add (and shard) a file encoded as multipart.
|
2018-08-06 10:44:44 +00:00
|
|
|
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *api.AddParams) (*cid.Cid, error) {
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Debugf("adding from multipart with params: %+v", p)
|
|
|
|
|
2018-07-04 16:30:24 +00:00
|
|
|
f := &files.MultipartFile{
|
|
|
|
Mediatype: "multipart/form-data",
|
|
|
|
Reader: r,
|
|
|
|
}
|
2018-08-06 20:49:28 +00:00
|
|
|
defer close(a.output)
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
ctxRun, cancelRun := context.WithCancel(ctx)
|
|
|
|
defer cancelRun()
|
2018-07-04 16:30:24 +00:00
|
|
|
|
|
|
|
pinOpts := api.PinOptions{
|
|
|
|
ReplicationFactorMin: p.ReplicationFactorMin,
|
|
|
|
ReplicationFactorMax: p.ReplicationFactorMax,
|
|
|
|
Name: p.Name,
|
|
|
|
ShardSize: p.ShardSize,
|
|
|
|
}
|
|
|
|
|
2018-08-06 20:49:28 +00:00
|
|
|
dagBuilder := newClusterDAGBuilder(a.rpcClient, pinOpts, a.output)
|
2018-07-04 16:30:24 +00:00
|
|
|
// Always stop the builder
|
|
|
|
defer dagBuilder.Cancel()
|
|
|
|
|
|
|
|
blockHandle := func(ctx context.Context, n *api.NodeWithMeta) (string, error) {
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Debugf("handling block %s (size %d)", n.Cid, n.Size())
|
|
|
|
select {
|
|
|
|
case <-dagBuilder.Done():
|
|
|
|
return "", dagBuilder.Err()
|
|
|
|
case <-ctx.Done():
|
|
|
|
return "", ctx.Err()
|
|
|
|
case dagBuilder.Blocks() <- n:
|
|
|
|
return n.Cid, nil
|
|
|
|
}
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Debug("creating importer")
|
2018-08-06 20:49:28 +00:00
|
|
|
importer, err := adder.NewImporter(f, p, a.output)
|
2018-07-04 16:30:24 +00:00
|
|
|
if err != nil {
|
2018-07-19 13:17:27 +00:00
|
|
|
return nil, err
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Infof("importing file to Cluster (name '%s')", p.Name)
|
2018-08-06 20:49:28 +00:00
|
|
|
rootCidStr, err := importer.Run(ctxRun, blockHandle)
|
2018-07-04 16:30:24 +00:00
|
|
|
if err != nil {
|
2018-08-06 20:49:28 +00:00
|
|
|
cancelRun()
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Error("Importing aborted: ", err)
|
|
|
|
return nil, err
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger shard finalize
|
2018-08-06 20:49:28 +00:00
|
|
|
close(dagBuilder.Blocks())
|
2018-07-04 16:30:24 +00:00
|
|
|
|
2018-07-19 13:17:27 +00:00
|
|
|
select {
|
|
|
|
case <-dagBuilder.Done(): // wait for the builder to finish
|
|
|
|
err = dagBuilder.Err()
|
|
|
|
case <-ctx.Done():
|
|
|
|
err = ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
logger.Info("import process finished with error: ", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rootCid, err := cid.Decode(rootCidStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("bad root cid: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Info("import process finished successfully")
|
|
|
|
return rootCid, nil
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|