fix a few TODOs and FIXMEs

License: MIT
Signed-off-by: Hector Sanjuan <code@hector.link>
This commit is contained in:
Hector Sanjuan 2018-07-19 16:27:23 +02:00
parent f8ff9883b0
commit ffd36afbb6
9 changed files with 131 additions and 72 deletions

View File

@ -3,7 +3,6 @@ package adder
import (
"context"
"errors"
"fmt"
"io"
"sync"
@ -128,13 +127,8 @@ func (imp *Importer) Go(ctx context.Context) error {
FINALIZE:
_, err := ipfsAdder.Finalize()
if err != nil {
// FIXME: check if we ever get this error here
if isNotFound(err) {
fmt.Println("fixme importer.go", err)
} else {
imp.errors <- err
}
}
}()
return nil
}
@ -171,7 +165,7 @@ func (imp *Importer) Run(ctx context.Context, blockF BlockHandler) (string, erro
return retVal, err
}
case <-output:
// TODO
// TODO(hector): handle output?
}
}
BREAK:

View File

@ -9,7 +9,10 @@ import (
)
func TestImporter(t *testing.T) {
f := test.GetShardingDirSerial(t)
sth := test.NewShardingTestHelper()
defer sth.Clean()
f := sth.GetTreeSerialFile(t)
p := DefaultParams()
imp, err := NewImporter(f, p)
@ -52,7 +55,10 @@ func TestImporter(t *testing.T) {
}
func TestImporter_DoubleStart(t *testing.T) {
f := test.GetShardingDirSerial(t)
sth := test.NewShardingTestHelper()
defer sth.Clean()
f := sth.GetTreeSerialFile(t)
p := DefaultParams()
imp, err := NewImporter(f, p)

View File

@ -24,17 +24,6 @@ import (
logging "github.com/ipfs/go-log"
)
// FIXME
// // error used to i
// var errNotFound = errors.New("dagservice: block not found")
// func shouldIgnore(err error) bool {
// if err == errNotFound {
// return true
// }
// return false
// }
var log = logging.Logger("coreunix")
// how many bytes of progress to wait before sending a progress update message

View File

@ -8,8 +8,11 @@ import (
"mime/multipart"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/rpcutil"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
"github.com/ipfs/go-ipfs-cmdkit/files"
@ -28,25 +31,62 @@ func New(rpc *rpc.Client) *Adder {
}
}
func (a *Adder) putBlock(ctx context.Context, n *api.NodeWithMeta, dests []peer.ID) error {
logger.Debugf("put block: %s", n.Cid)
c, err := cid.Decode(n.Cid)
if err != nil {
return err
}
format, ok := cid.CodecToStr[c.Type()]
if !ok {
format = ""
logger.Warning("unsupported cid type, treating as v0")
}
if c.Prefix().Version == 0 {
format = "v0"
}
n.Format = format
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(dests))
defer rpcutil.MultiCancel(cancels)
logger.Debugf("block put %s", n.Cid)
errs := a.rpcClient.MultiCall(
ctxs,
dests,
"Cluster",
"IPFSBlockPut",
*n,
rpcutil.RPCDiscardReplies(len(dests)),
)
return rpcutil.CheckErrs(errs)
}
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader, p *adder.Params) (*cid.Cid, error) {
f := &files.MultipartFile{
Mediatype: "multipart/form-data",
Reader: r,
}
// TODO: it should send it to the best allocation
// TODO: Allocate()
localBlockPut := func(ctx context.Context, n *api.NodeWithMeta) (string, error) {
retVal := n.Cid
var allocsStr []string
err := a.rpcClient.CallContext(
ctx,
"",
"Cluster",
"IPFSBlockPut",
*n,
&struct{}{},
"Allocate",
api.PinWithOpts(nil, p.PinOptions).ToSerial(),
&allocsStr,
)
return retVal, err
if err != nil {
return nil, err
}
allocations := api.StringsToPeers(allocsStr)
localBlockPut := func(ctx context.Context, n *api.NodeWithMeta) (string, error) {
retVal := n.Cid
return retVal, a.putBlock(ctx, n, allocations)
}
importer, err := adder.NewImporter(f, p)

View File

@ -2,6 +2,7 @@ package local
import (
"context"
"errors"
"mime/multipart"
"sync"
"testing"
@ -28,6 +29,14 @@ func (rpcs *testRPC) Pin(ctx context.Context, in api.PinSerial, out *struct{}) e
return nil
}
func (rpcs *testRPC) Allocate(ctx context.Context, in api.PinSerial, out *[]string) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
*out = []string{""}
return nil
}
func TestFromMultipart(t *testing.T) {
t.Run("balanced", func(t *testing.T) {
rpcObj := &testRPC{}

View File

@ -29,18 +29,19 @@ func DefaultParams() *Params {
Hidden: false,
Shard: false,
PinOptions: api.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
ReplicationFactorMin: 0,
ReplicationFactorMax: 0,
Name: "",
ShardSize: 100 * 1024 * 1024, // FIXME
ShardSize: 100 * 1024 * 1024, // 100 MB
},
}
}
// ParamsFromQuery parses the Params object from
// a URL.Query().
// FIXME? Defaults?
func ParamsFromQuery(query url.Values) (*Params, error) {
params := DefaultParams()
layout := query.Get("layout")
switch layout {
case "trickle":
@ -48,47 +49,60 @@ func ParamsFromQuery(query url.Values) (*Params, error) {
default:
return nil, errors.New("parameter trickle invalid")
}
params.Layout = layout
chunker := query.Get("chunker")
params.Chunker = chunker
name := query.Get("name")
raw, err := strconv.ParseBool(query.Get("raw"))
params.Name = name
if v := query.Get("raw"); v != "" {
raw, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.New("parameter raw invalid")
}
hidden, err := strconv.ParseBool(query.Get("hidden"))
params.RawLeaves = raw
}
if v := query.Get("hidden"); v != "" {
hidden, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.New("parameter hidden invalid")
}
shard, err := strconv.ParseBool(query.Get("shard"))
params.Hidden = hidden
}
if v := query.Get("shard"); v != "" {
shard, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.New("parameter shard invalid")
}
replMin, err := strconv.Atoi(query.Get("repl_min"))
params.Shard = shard
}
if v := query.Get("repl_min"); v != "" {
replMin, err := strconv.Atoi(v)
if err != nil || replMin < -1 {
return nil, errors.New("parameter repl_min invalid")
}
replMax, err := strconv.Atoi(query.Get("repl_max"))
params.ReplicationFactorMin = replMin
}
if v := query.Get("repl_max"); v != "" {
replMax, err := strconv.Atoi(v)
if err != nil || replMax < -1 {
return nil, errors.New("parameter repl_max invalid")
}
params.ReplicationFactorMax = replMax
}
shardSize, err := strconv.ParseUint(query.Get("shard_size"), 10, 64)
if v := query.Get("shard_size"); v != "" {
shardSize, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, errors.New("parameter shard_size is invalid")
}
params := &Params{
Layout: layout,
Chunker: chunker,
RawLeaves: raw,
Hidden: hidden,
Shard: shard,
PinOptions: api.PinOptions{
ReplicationFactorMin: replMin,
ReplicationFactorMax: replMax,
Name: name,
ShardSize: shardSize,
},
params.ShardSize = shardSize
}
return params, nil
}

View File

@ -168,6 +168,7 @@ func (cdb *clusterDAGBuilder) finalize() error {
// pin.Type = api.ShardType
// pin.Parents = shardParents
// // FIXME: We don't know anymore the shard pin maxDepth
// // so we'd need to get the pin first.
// err := cdb.pin(pin)
// if err != nil {
// return err

View File

@ -20,6 +20,7 @@ var LoggingFacilities = map[string]string{
"config": "INFO",
"addshard": "INFO",
"addlocal": "INFO",
"adder": "INFO",
}
// LoggingFacilitiesExtra provides logging identifiers

View File

@ -221,13 +221,18 @@ func (sth *ShardingTestHelper) makeRandFile(t *testing.T, kbs int) os.FileInfo {
//
// The total size in ext4 is ~3420160 Bytes = ~3340 kB = ~3.4MB
func (sth *ShardingTestHelper) GetTreeMultiReader(t *testing.T) *files.MultiFileReader {
sf := sth.GetTreeSerialFile(t)
slf := files.NewSliceFile("", "", []files.File{sf})
return files.NewMultiFileReader(slf, true)
}
func (sth *ShardingTestHelper) GetTreeSerialFile(t *testing.T) files.File {
st := sth.makeTree(t)
sf, err := files.NewSerialFile(shardingTestTree, sth.path(shardingTestTree), false, st)
if err != nil {
t.Fatal(err)
}
slf := files.NewSliceFile("", "", []files.File{sf})
return files.NewMultiFileReader(slf, true)
return sf
}
// GetRandFileMultiReader creates and returns a MultiFileReader for