Tests to front end of adding pipeline

api client test
api test
one sharness test
refactoring of testingData for access from other packages
always wrap files added by cluster
remove unused flag 'progress'
changes to support hidden flag

License: MIT
Signed-off-by: Wyatt Daviau <wdaviau@cs.stanford.edu>
This commit is contained in:
Wyatt Daviau 2018-04-20 18:57:13 -04:00 committed by Hector Sanjuan
parent 62eb45279b
commit c4d1b34810
21 changed files with 169 additions and 50 deletions

View File

@ -188,8 +188,6 @@ func (a *AddSession) AddFile(ctx context.Context,
}
chunker := params.Get("chunker")
raw, _ := strconv.ParseBool(params.Get("raw"))
wrap, _ := strconv.ParseBool(params.Get("wrap"))
progress, _ := strconv.ParseBool(params.Get("progress"))
hidden, _ := strconv.ParseBool(params.Get("hidden"))
silent, _ := strconv.ParseBool(params.Get("silent"))
@ -202,12 +200,10 @@ func (a *AddSession) AddFile(ctx context.Context,
printChan, outChan, errChan := importer.ToChannel(
ctx,
f,
progress,
hidden,
trickle,
raw,
silent,
wrap,
chunker,
)

View File

@ -314,18 +314,15 @@ func (c *Client) AddMultiFile(
layout string,
chunker string,
raw bool,
wrap bool,
progress bool,
hidden bool,
replMin, replMax int) ([]api.AddedOutput, error) {
headers := make(map[string]string)
headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary()
fmtStr1 := "/allocations?shard=%t&quiet=%t&silent=%t&layout=%s&"
fmtStr2 := "chunker=%s&raw=%t&wrap=%t&progress=%t&hidden=%t&"
fmtStr3 := "repl_min=%d&repl_max=%d"
url := fmt.Sprintf(fmtStr1+fmtStr2+fmtStr3, shard, quiet, silent, layout, chunker,
raw, wrap, progress, hidden, replMin, replMax)
fmtStr2 := "chunker=%s&raw=%t&hidden=%t&repl_min=%d&repl_max=%d"
url := fmt.Sprintf(fmtStr1+fmtStr2, shard, quiet, silent, layout, chunker,
raw, hidden, replMin, replMax)
output := make([]api.AddedOutput, 0)
err := c.doStream("POST", url, multiFileR, headers, &output)
return output, err

View File

@ -432,3 +432,21 @@ func TestWaitFor(t *testing.T) {
testClients(t, tapi, testF)
}
func TestAddMultiFile(t *testing.T) {
c, api := testClient(t)
defer api.Shutdown()
mfr, err := test.GetTestingDirMultiReader()
if err != nil {
t.Fatal(err)
}
out, err := c.AddMultiFile(mfr, false, false, false, "", "", false,
false, -1, -1)
if err != nil {
t.Fatal(err)
}
if len(out) != 1 || out[0].Hash != test.TestCid1 {
t.Fatal("unexpected addedoutput from mock rpc on api")
}
}

View File

@ -58,10 +58,8 @@ func (c *Client) doStreamRequest(method, path string, body io.Reader, headers ma
r.Header.Set(k, v)
}
// Here are the streaming specific modifications
// Using HTTP 2.0 to enable parallel reading req and writing resp
r.ProtoMajor = 2
r.ProtoMinor = 0
r.ProtoMajor = 1
r.ProtoMinor = 1
r.ContentLength = -1
return c.client.Do(r)

View File

@ -518,6 +518,7 @@ func (api *API) addFileHandler(w http.ResponseWriter, r *http.Request) {
toPrint, err := addSess.AddFile(api.ctx, reader, params)
if err != nil {
sendErrorResponse(w, 500, err.Error())
return
}
sendJSONResponse(w, 200, toPrint)
}

View File

@ -7,6 +7,7 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
@ -173,6 +174,11 @@ func makeDelete(t *testing.T, rest *API, url string, resp interface{}) {
req, _ := http.NewRequest("DELETE", url, bytes.NewReader([]byte{}))
httpResp, err := c.Do(req)
processResp(t, httpResp, err, resp)
}
func makePostRaw(t *testing.T, url string, body io.Reader, contentType string, resp interface{}) {
httpResp, err := http.Post(url, contentType, body)
processResp(t, httpResp, err, resp)
}
type testF func(t *testing.T, url urlF)
@ -302,6 +308,43 @@ func TestAPIPeerAddEndpoint(t *testing.T) {
testBothEndpoints(t, tf)
}
func TestAPIAddFileEndpoint(t *testing.T) {
rest := testAPI(t)
defer rest.Shutdown()
fmtStr1 := "/allocations?shard=false&quiet=false&silent=false&"
fmtStr2 := "layout=''&chunker=''&raw=false&"
fmtStr3 := "&hidden=false&repl_min=-1&repl_max=-1"
// mock rpc returns success with these params (shard=false)
successUrl := apiURL(rest) + fmtStr1 + fmtStr2 + fmtStr3
// Test with bad content-type
body, err := test.GetTestingDirMultiReader()
if err != nil {
t.Fatal(err)
}
errResp := api.Error{}
makePostRaw(t, successUrl, body, "text/html", &errResp)
if errResp.Code != 415 {
t.Error("expected error with bad content-type")
}
// Add a param value that leads to 500 on mock and send this param over
mpContentType := "multipart/form-data; boundary=" + body.Boundary()
fmtStr1Bad := "/allocations?shard=true&quiet=false&silent=false&"
failUrl := apiURL(rest) + fmtStr1Bad + fmtStr2 + fmtStr3
makePostRaw(t, failUrl, body, mpContentType, &errResp)
if errResp.Code != 500 {
t.Error("expected error with params causing mockrpc AddFile fail")
}
// Test with a correct input
resp := []api.AddedOutput{}
makePostRaw(t, successUrl, body, mpContentType, &resp)
if len(resp) != 1 || resp[0].Hash != test.TestCid1 {
t.Fatal("unexpected addedoutput from mock rpc on api")
}
}
func TestAPIPeerRemoveEndpoint(t *testing.T) {
rest := testAPI(t)
defer rest.Shutdown()

View File

@ -18,8 +18,8 @@ func shouldIgnore(err error) bool {
// ToChannel imports file to ipfs ipld nodes, outputting nodes on the
// provided channel
func ToChannel(ctx context.Context, f files.File, progress bool, hidden bool,
trickle bool, raw bool, silent bool, wrap bool,
func ToChannel(ctx context.Context, f files.File, hidden bool,
trickle bool, raw bool, silent bool,
chunker string) (<-chan *api.AddedOutput, <-chan *api.NodeWithMeta, <-chan error) {
printChan := make(chan *api.AddedOutput)
@ -38,12 +38,13 @@ func ToChannel(ctx context.Context, f files.File, progress bool, hidden bool,
}()
return printChan, outChan, errChan
}
// fileAdder.Progress = progress //TODO get progress working eventually. dont need complexity right now
fileAdder.Hidden = hidden
fileAdder.Trickle = trickle
fileAdder.RawLeaves = raw
fileAdder.Silent = silent
fileAdder.Wrap = wrap
// Files added in one session are wrapped. This is because if files
// are sharded together then the share one logical clusterDAG root hash
fileAdder.Wrap = true
fileAdder.Chunker = chunker
fileAdder.Out = printChan

View File

@ -2,30 +2,11 @@ package importer
import (
"context"
"fmt"
"os"
"path"
"testing"
"github.com/ipfs/go-ipfs-cmdkit/files"
"github.com/ipfs/ipfs-cluster/test"
)
const testDir = "testingData"
func getTestingDir() (files.File, error) {
fpath := testDir
stat, err := os.Lstat(fpath)
if err != nil {
return nil, err
}
if !stat.IsDir() {
return nil, fmt.Errorf("testDir should be seen as directory")
}
return files.NewSerialFile(path.Base(fpath), fpath, false, stat)
}
var cids = [18]string{"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn",
"Qmbp4C4KkyjVzTpZ327Ub555FEHizhJS4M17f2zCCrQMAz",
"QmYz38urZ99eVCxSccM63bGtDv54UWtBDWJdTxGch23khA",
@ -49,7 +30,7 @@ var cids = [18]string{"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn",
// import and receive all blocks
func TestToChannelOutput(t *testing.T) {
file, err := getTestingDir()
file, err := test.GetTestingDirSerial()
if err != nil {
t.Fatal(err)
}
@ -98,7 +79,7 @@ func TestToChannelOutput(t *testing.T) {
}
func TestToChannelPrint(t *testing.T) {
file, err := getTestingDir()
file, err := test.GetTestingDirSerial()
if err != nil {
t.Fatal(err)
}

View File

@ -9,16 +9,17 @@ import (
"github.com/ipfs/go-ipfs-cmdkit/files"
)
func parseFileArgs(paths []string, recursive bool) (*files.MultiFileReader, error) {
func parseFileArgs(paths []string, recursive, hidden bool) (*files.MultiFileReader, error) {
// logic largely drawn from go-ipfs-cmds/cli/parse.go: parseArgs
parsedFiles := make([]files.File, len(paths), len(paths))
for _, path := range paths {
file, err := appendFile(path, recursive, false) // for now no hidden support
file, err := appendFile(path, recursive, hidden)
if err != nil {
return nil, err
}
parsedFiles = append(parsedFiles, file)
}
sliceFile := files.NewSliceFile("", "", parsedFiles)
return files.NewMultiFileReader(sliceFile, true), nil
}

View File

@ -224,10 +224,6 @@ chunker: 'rabin-<min>-<avg>-<max>'. Default is 'size-262144'`,
Name: "raw-leaves",
Usage: "Use raw blocks for leaves (experimental)",
},
cli.BoolFlag{
Name: "wrap, w",
Usage: "wrap files with a directory object",
},
cli.BoolFlag{
Name: "progress, p",
Usage: "Stream progress data",
@ -252,16 +248,15 @@ chunker: 'rabin-<min>-<avg>-<max>'. Default is 'size-262144'`,
for i, path := range c.Args() {
paths[i] = path
}
// Unclear if multiFileR is ready for streaming, but hypothesis is yes.
// Files are all opened but not read until they are sent.
multiFileR, err := parseFileArgs(paths, c.Bool("recursive"))
multiFileR, err := parseFileArgs(paths, c.Bool("recursive"), c.Bool("hidden"))
checkErr("serializing all files", err)
resp, cerr := globalClient.AddMultiFile(multiFileR,
c.Bool("shard"), c.Bool("quiet"),
c.Bool("silent"), c.String("layout"),
c.String("chunker"),
c.Bool("raw-leaves"), c.Bool("wrap"),
c.Bool("progress"), c.Bool("hidden"),
c.Bool("raw-leaves"),
c.Bool("hidden"),
c.Int("replication-min"),
c.Int("replication-max"))
formatResponse(c, resp, cerr)

32
sharness/t0031-ctl-add.sh Executable file
View File

@ -0,0 +1,32 @@
#!/bin/bash
test_description="Test cluster-ctl's add functionality"
. lib/test-lib.sh
test_ipfs_init
test_cluster_init
test_expect_success IPFS,CLUSTER "add small file to cluster with ctl" '
output=`ipfs-cluster-ctl add ../test_data/small_file | tail -1` &&
cid=${output:7:47} &&
ipfs-cluster-ctl pin ls | grep -q "$cid"
'
test_expect_success IPFS,CLUSTER "add small file with sharding" '
echo "complete me"
'
# add, make sure root is in ls -a
# root not in ls
# root is in metapin
# follow clusterdag make sure it points to shard pin
# follow shard pin make sure it points to the root hash and has the correct size
test_expect_success IPFS,CLUSTER "add large file with sharding" '
echo "complete me"
'
test_clean_ipfs
test_clean_cluster
test_done

View File

@ -0,0 +1 @@
small file

View File

@ -45,6 +45,23 @@ func (mock *mockService) Pin(ctx context.Context, in api.PinSerial, out *struct{
return nil
}
func (mock *mockService) AddFile(ctx context.Context, in api.FileInfo, out *[]api.AddedOutput) error {
param := in.Params["shard"]
if len(param) == 1 && param[0] == "true" {
return errors.New("rpc mock error on shard")
}
*out = []api.AddedOutput{
api.AddedOutput{
Name: "mock file",
Hash: TestCid1,
Bytes: 0,
Size: "",
},
}
return nil
}
func (mock *mockService) Unpin(ctx context.Context, in api.PinSerial, out *struct{}) error {
if in.Cid == ErrorCid {
return ErrBadCid

38
test/test_files.go Normal file
View File

@ -0,0 +1,38 @@
package test
import (
"fmt"
"os"
"path"
"strings"
"github.com/ipfs/go-ipfs-cmdkit/files"
)
const relTestDir = "src/github.com/ipfs/ipfs-cluster/test/testingData"
// GetTestingDirSerial returns a cmdkits serial file to the testing directory.
// $GOPATH must be set for this to work
func GetTestingDirSerial() (files.File, error) {
fpath := strings.Join([]string{os.Getenv("GOPATH"), relTestDir}, "/")
stat, err := os.Lstat(fpath)
if err != nil {
return nil, err
}
if !stat.IsDir() {
return nil, fmt.Errorf("testDir should be seen as directory")
}
return files.NewSerialFile(path.Base(fpath), fpath, false, stat)
}
// GetTestingDirMultiReader returns a cmdkits multifilereader to the testing
// directory. $GOPATH must be set for this to work
func GetTestingDirMultiReader() (*files.MultiFileReader, error) {
file, err := GetTestingDirSerial()
if err != nil {
return nil, err
}
sliceFile := files.NewSliceFile("", "", []files.File{file})
return files.NewMultiFileReader(sliceFile, true), nil
}