2017-12-05 20:44:22 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-03-14 08:49:49 +00:00
|
|
|
"context"
|
2017-12-05 20:44:22 +00:00
|
|
|
"encoding/json"
|
2018-10-23 18:21:27 +00:00
|
|
|
"errors"
|
2017-12-05 20:44:22 +00:00
|
|
|
"fmt"
|
|
|
|
"net/url"
|
2018-08-19 17:39:08 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2018-08-10 12:39:44 +00:00
|
|
|
"strings"
|
2018-03-14 08:49:49 +00:00
|
|
|
"time"
|
2017-12-05 20:44:22 +00:00
|
|
|
|
2018-07-17 10:51:31 +00:00
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
|
|
|
2018-12-06 18:59:05 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
|
|
|
files "github.com/ipfs/go-ipfs-files"
|
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
2017-12-05 20:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// ID returns information about the cluster Peer.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) ID() (api.ID, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var id api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/id", nil, nil, &id)
|
2017-12-05 20:44:22 +00:00
|
|
|
return id.ToID(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers requests ID information for all cluster peers.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) Peers() ([]api.ID, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var ids []api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/peers", nil, nil, &ids)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.ID, len(ids))
|
|
|
|
for i, id := range ids {
|
|
|
|
result[i] = id.ToID()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type peerAddBody struct {
|
2018-07-17 10:51:31 +00:00
|
|
|
PeerID string `json:"peer_id"`
|
2017-12-05 20:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// PeerAdd adds a new peer to the cluster.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) PeerAdd(pid peer.ID) (api.ID, error) {
|
2018-07-17 10:51:31 +00:00
|
|
|
pidStr := peer.IDB58Encode(pid)
|
|
|
|
body := peerAddBody{pidStr}
|
2017-12-05 20:44:22 +00:00
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buf)
|
|
|
|
enc.Encode(body)
|
|
|
|
|
|
|
|
var id api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", "/peers", nil, &buf, &id)
|
2017-12-05 20:44:22 +00:00
|
|
|
return id.ToID(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerRm removes a current peer from the cluster
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) PeerRm(id peer.ID) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
return c.do("DELETE", fmt.Sprintf("/peers/%s", id.Pretty()), nil, nil, nil)
|
2017-12-05 20:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pin tracks a Cid with the given replication factor and a name for
|
|
|
|
// human-friendliness.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Pin(ci cid.Cid, replicationFactorMin, replicationFactorMax int, name string) error {
|
2017-12-05 20:44:22 +00:00
|
|
|
escName := url.QueryEscape(name)
|
|
|
|
err := c.do(
|
|
|
|
"POST",
|
2018-03-14 08:49:49 +00:00
|
|
|
fmt.Sprintf(
|
2018-08-08 19:11:26 +00:00
|
|
|
"/pins/%s?replication-min=%d&replication-max=%d&name=%s",
|
2017-12-05 20:44:22 +00:00
|
|
|
ci.String(),
|
2018-01-15 18:31:51 +00:00
|
|
|
replicationFactorMin,
|
|
|
|
replicationFactorMax,
|
2018-03-14 08:49:49 +00:00
|
|
|
escName,
|
|
|
|
),
|
|
|
|
nil,
|
|
|
|
nil,
|
2018-08-06 20:49:28 +00:00
|
|
|
nil,
|
2018-03-14 08:49:49 +00:00
|
|
|
)
|
2017-12-05 20:44:22 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unpin untracks a Cid from cluster.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Unpin(ci cid.Cid) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
return c.do("DELETE", fmt.Sprintf("/pins/%s", ci.String()), nil, nil, nil)
|
2017-12-05 20:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocations returns the consensus state listing all tracked items and
|
|
|
|
// the peers that should be pinning them.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) Allocations(filter api.PinType) ([]api.Pin, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var pins []api.PinSerial
|
2018-08-10 12:39:44 +00:00
|
|
|
|
|
|
|
types := []api.PinType{
|
|
|
|
api.DataType,
|
|
|
|
api.MetaType,
|
|
|
|
api.ClusterDAGType,
|
|
|
|
api.ShardType,
|
|
|
|
}
|
|
|
|
|
|
|
|
var strFilter []string
|
|
|
|
|
|
|
|
if filter == api.AllType {
|
|
|
|
strFilter = []string{"all"}
|
|
|
|
} else {
|
|
|
|
for _, t := range types {
|
|
|
|
if t&filter > 0 { // the filter includes this type
|
|
|
|
strFilter = append(strFilter, t.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-22 21:45:33 +00:00
|
|
|
f := url.QueryEscape(strings.Join(strFilter, ","))
|
|
|
|
err := c.do("GET", fmt.Sprintf("/allocations?filter=%s", f), nil, nil, &pins)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.Pin, len(pins))
|
|
|
|
for i, p := range pins {
|
|
|
|
result[i] = p.ToPin()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocation returns the current allocations for a given Cid.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Allocation(ci cid.Cid) (api.Pin, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var pin api.PinSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
|
2017-12-05 20:44:22 +00:00
|
|
|
return pin.ToPin(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Status returns the current ipfs state for a given Cid. If local is true,
|
|
|
|
// the information affects only the current peer, otherwise the information
|
|
|
|
// is fetched from all cluster peers.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Status(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatusAll gathers Status() for all tracked items.
|
Status filters for `ipfs-cluster-ctl status`
Added filter option to `ipfs-cluster-ctl status`
When the --filter is passed, it will only fetch the peer information
where status of the pin matches with the filter value.
Valid filter values are tracker status types(i.e., "pinned",
"pin_error", "unpinning" etc), an alias of tracker status type (i.e.,
"queued" or "error"), comma separated list of tracker status type
and/or it aliases(i.e., "error,pinning")
On passing invalid filter value no status information will be shown
In particular, the filter would remove elements from []GlobalPinInfo
when none of the peers in GlobalPinInfo match the filter. If one peer
in the GlobalPinInfo matches the filter, the whole object is returned,
including the information for the other peers which may or not match it.
filter option works on statusAll("GET /pins"). For fetching pin status
for a CID("GET /pins/<cid>"), filter option would have no effect
Fixes #445
License: MIT
Signed-off-by: Kishan Mohanbhai Sagathiya <kishansagathiya@gmail.com>
2018-12-20 08:02:26 +00:00
|
|
|
// If valid filter value is provided, it would fetch only those status information
|
2018-12-26 05:45:30 +00:00
|
|
|
// where status is matching the filter value.
|
Status filters for `ipfs-cluster-ctl status`
Added filter option to `ipfs-cluster-ctl status`
When the --filter is passed, it will only fetch the peer information
where status of the pin matches with the filter value.
Valid filter values are tracker status types(i.e., "pinned",
"pin_error", "unpinning" etc), an alias of tracker status type (i.e.,
"queued" or "error"), comma separated list of tracker status type
and/or it aliases(i.e., "error,pinning")
On passing invalid filter value no status information will be shown
In particular, the filter would remove elements from []GlobalPinInfo
when none of the peers in GlobalPinInfo match the filter. If one peer
in the GlobalPinInfo matches the filter, the whole object is returned,
including the information for the other peers which may or not match it.
filter option works on statusAll("GET /pins"). For fetching pin status
for a CID("GET /pins/<cid>"), filter option would have no effect
Fixes #445
License: MIT
Signed-off-by: Kishan Mohanbhai Sagathiya <kishansagathiya@gmail.com>
2018-12-20 08:02:26 +00:00
|
|
|
// Valid filter values are tracker status type, an alias of tracker status type
|
2018-12-26 05:45:30 +00:00
|
|
|
// (queued or error), comma separated list of track status type and/or it aliases.
|
Status filters for `ipfs-cluster-ctl status`
Added filter option to `ipfs-cluster-ctl status`
When the --filter is passed, it will only fetch the peer information
where status of the pin matches with the filter value.
Valid filter values are tracker status types(i.e., "pinned",
"pin_error", "unpinning" etc), an alias of tracker status type (i.e.,
"queued" or "error"), comma separated list of tracker status type
and/or it aliases(i.e., "error,pinning")
On passing invalid filter value no status information will be shown
In particular, the filter would remove elements from []GlobalPinInfo
when none of the peers in GlobalPinInfo match the filter. If one peer
in the GlobalPinInfo matches the filter, the whole object is returned,
including the information for the other peers which may or not match it.
filter option works on statusAll("GET /pins"). For fetching pin status
for a CID("GET /pins/<cid>"), filter option would have no effect
Fixes #445
License: MIT
Signed-off-by: Kishan Mohanbhai Sagathiya <kishansagathiya@gmail.com>
2018-12-20 08:02:26 +00:00
|
|
|
func (c *defaultClient) StatusAll(filter string, local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-12-26 05:45:30 +00:00
|
|
|
|
|
|
|
if !api.IsFilterValid(filter) {
|
|
|
|
return make([]api.GlobalPinInfo, len(gpis)), errors.New("invalid filter value")
|
|
|
|
}
|
|
|
|
|
|
|
|
err := c.do("GET", fmt.Sprintf("/pins?local=%t&filter=%s", local, url.QueryEscape(filter)), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync makes sure the state of a Cid corresponds to the state reported by
|
|
|
|
// the ipfs daemon, and returns it. If local is true, this operation only
|
|
|
|
// happens on the current peer, otherwise it happens on every cluster peer.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Sync(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncAll triggers Sync() operations for all tracked items. It only returns
|
|
|
|
// informations for items that were de-synced or have an error state. If
|
|
|
|
// local is true, the operation is limited to the current peer. Otherwise
|
|
|
|
// it happens on every cluster peer.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) SyncAll(local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/sync?local=%t", local), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
|
|
|
|
// If local is true, the operation is limited to the current peer, otherwise
|
|
|
|
// it happens on every cluster peer.
|
2018-10-01 01:53:26 +00:00
|
|
|
func (c *defaultClient) Recover(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// RecoverAll triggers Recover() operations on all tracked items. If local is
|
|
|
|
// true, the operation is limited to the current peer. Otherwise, it happens
|
|
|
|
// everywhere.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) RecoverAll(local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Version returns the ipfs-cluster peer's version.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) Version() (api.Version, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var ver api.Version
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/version", nil, nil, &ver)
|
2017-12-05 20:44:22 +00:00
|
|
|
return ver, err
|
|
|
|
}
|
2017-10-13 21:12:46 +00:00
|
|
|
|
2018-01-18 02:49:35 +00:00
|
|
|
// GetConnectGraph returns an ipfs-cluster connection graph.
|
2017-10-13 21:12:46 +00:00
|
|
|
// The serialized version, strings instead of pids, is returned
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) GetConnectGraph() (api.ConnectGraphSerial, error) {
|
2017-10-13 21:12:46 +00:00
|
|
|
var graphS api.ConnectGraphSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/health/graph", nil, nil, &graphS)
|
2017-10-13 21:12:46 +00:00
|
|
|
return graphS, err
|
|
|
|
}
|
2018-03-14 08:49:49 +00:00
|
|
|
|
2018-10-23 18:21:27 +00:00
|
|
|
// Metrics returns a map with the latest valid metrics of the given name
|
2018-10-07 16:32:46 +00:00
|
|
|
// for the current cluster peers.
|
2018-10-21 06:34:50 +00:00
|
|
|
func (c *defaultClient) Metrics(name string) ([]api.Metric, error) {
|
2018-10-23 18:21:27 +00:00
|
|
|
if name == "" {
|
|
|
|
return nil, errors.New("bad metric name")
|
|
|
|
}
|
2018-10-07 16:32:46 +00:00
|
|
|
var metrics []api.Metric
|
2018-10-21 06:34:50 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/monitor/metrics/%s", name), nil, nil, &metrics)
|
2018-10-07 16:32:46 +00:00
|
|
|
return metrics, err
|
|
|
|
}
|
|
|
|
|
2018-10-03 08:55:57 +00:00
|
|
|
// WaitFor is a utility function that allows for a caller to wait for a
|
|
|
|
// paticular status for a CID (as defined by StatusFilterParams).
|
|
|
|
// It returns the final status for that CID and an error, if there was.
|
|
|
|
//
|
|
|
|
// WaitFor works by calling Status() repeatedly and checking that all
|
|
|
|
// peers have transitioned to the target TrackerStatus or are Remote.
|
|
|
|
// If an error of some type happens, WaitFor returns immediately with an
|
|
|
|
// empty GlobalPinInfo.
|
2018-08-30 23:14:06 +00:00
|
|
|
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPinInfo, error) {
|
2018-03-14 08:49:49 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
sf := newStatusFilter()
|
|
|
|
|
|
|
|
go sf.pollStatus(ctx, c, fp)
|
|
|
|
go sf.filter(ctx, fp)
|
|
|
|
|
2018-03-28 15:11:43 +00:00
|
|
|
var status api.GlobalPinInfo
|
|
|
|
|
2018-03-14 08:49:49 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return api.GlobalPinInfo{}, ctx.Err()
|
|
|
|
case err := <-sf.Err:
|
|
|
|
return api.GlobalPinInfo{}, err
|
2018-03-28 15:11:43 +00:00
|
|
|
case st, ok := <-sf.Out:
|
2018-03-29 08:33:09 +00:00
|
|
|
if !ok { // channel closed
|
2018-03-28 15:11:43 +00:00
|
|
|
return status, nil
|
2018-03-14 08:49:49 +00:00
|
|
|
}
|
2018-03-29 08:33:09 +00:00
|
|
|
status = st
|
2018-03-14 08:49:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatusFilterParams contains the parameters required
|
|
|
|
// to filter a stream of status results.
|
|
|
|
type StatusFilterParams struct {
|
2018-09-22 01:00:10 +00:00
|
|
|
Cid cid.Cid
|
2018-03-14 08:49:49 +00:00
|
|
|
Local bool
|
|
|
|
Target api.TrackerStatus
|
|
|
|
CheckFreq time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type statusFilter struct {
|
|
|
|
In, Out chan api.GlobalPinInfo
|
|
|
|
Done chan struct{}
|
|
|
|
Err chan error
|
|
|
|
}
|
|
|
|
|
|
|
|
func newStatusFilter() *statusFilter {
|
|
|
|
return &statusFilter{
|
|
|
|
In: make(chan api.GlobalPinInfo),
|
|
|
|
Out: make(chan api.GlobalPinInfo),
|
|
|
|
Done: make(chan struct{}),
|
|
|
|
Err: make(chan error),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sf *statusFilter) filter(ctx context.Context, fp StatusFilterParams) {
|
|
|
|
defer close(sf.Done)
|
|
|
|
defer close(sf.Out)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
sf.Err <- ctx.Err()
|
|
|
|
return
|
|
|
|
case gblPinInfo, more := <-sf.In:
|
|
|
|
if !more {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ok, err := statusReached(fp.Target, gblPinInfo)
|
|
|
|
if err != nil {
|
|
|
|
sf.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-28 14:04:21 +00:00
|
|
|
sf.Out <- gblPinInfo
|
2018-03-14 08:49:49 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 23:14:06 +00:00
|
|
|
func (sf *statusFilter) pollStatus(ctx context.Context, c Client, fp StatusFilterParams) {
|
2018-03-14 08:49:49 +00:00
|
|
|
ticker := time.NewTicker(fp.CheckFreq)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
sf.Err <- ctx.Err()
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2018-10-01 01:53:26 +00:00
|
|
|
gblPinInfo, err := c.Status(fp.Cid, fp.Local)
|
2018-03-14 08:49:49 +00:00
|
|
|
if err != nil {
|
|
|
|
sf.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logger.Debugf("pollStatus: status: %#v", gblPinInfo)
|
|
|
|
sf.In <- gblPinInfo
|
|
|
|
case <-sf.Done:
|
|
|
|
close(sf.In)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo) (bool, error) {
|
|
|
|
for _, pinInfo := range gblPinInfo.PeerMap {
|
|
|
|
switch pinInfo.Status {
|
|
|
|
case target:
|
|
|
|
continue
|
|
|
|
case api.TrackerStatusBug, api.TrackerStatusClusterError, api.TrackerStatusPinError, api.TrackerStatusUnpinError:
|
|
|
|
return false, fmt.Errorf("error has occurred while attempting to reach status: %s", target.String())
|
|
|
|
case api.TrackerStatusRemote:
|
|
|
|
if target == api.TrackerStatusPinned {
|
|
|
|
continue // to next pinInfo
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
default:
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
2018-01-29 22:11:23 +00:00
|
|
|
|
2018-08-19 17:39:08 +00:00
|
|
|
// logic drawn from go-ipfs-cmds/cli/parse.go: appendFile
|
2018-12-01 04:58:55 +00:00
|
|
|
func makeSerialFile(fpath string, params *api.AddParams) (files.Node, error) {
|
2018-08-19 17:39:08 +00:00
|
|
|
if fpath == "." {
|
|
|
|
cwd, err := os.Getwd()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cwd, err = filepath.EvalSymlinks(cwd)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fpath = cwd
|
|
|
|
}
|
|
|
|
|
|
|
|
fpath = filepath.ToSlash(filepath.Clean(fpath))
|
|
|
|
|
|
|
|
stat, err := os.Lstat(fpath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if stat.IsDir() {
|
|
|
|
if !params.Recursive {
|
2018-08-20 10:08:49 +00:00
|
|
|
return nil, fmt.Errorf("%s is a directory, but Recursive option is not set", fpath)
|
2018-08-19 17:39:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-01 04:58:55 +00:00
|
|
|
return files.NewSerialFile(fpath, params.Hidden, stat)
|
2018-08-19 17:39:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add imports files to the cluster from the given paths. A path can
|
|
|
|
// either be a local filesystem location or an web url (http:// or https://).
|
|
|
|
// In the latter case, the destination will be downloaded with a GET request.
|
|
|
|
// The AddParams allow to control different options, like enabling the
|
|
|
|
// sharding the resulting DAG across the IPFS daemons of multiple cluster
|
|
|
|
// peers. The output channel will receive regular updates as the adding
|
|
|
|
// process progresses.
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) Add(
|
2018-08-19 17:39:08 +00:00
|
|
|
paths []string,
|
|
|
|
params *api.AddParams,
|
|
|
|
out chan<- *api.AddedOutput,
|
|
|
|
) error {
|
|
|
|
|
2018-12-01 04:58:55 +00:00
|
|
|
addFiles := make([]files.DirEntry, len(paths), len(paths))
|
|
|
|
for i, p := range paths {
|
|
|
|
u, err := url.Parse(p)
|
2018-08-19 17:39:08 +00:00
|
|
|
if err != nil {
|
2018-08-20 10:08:49 +00:00
|
|
|
close(out)
|
2018-08-19 17:39:08 +00:00
|
|
|
return fmt.Errorf("error parsing path: %s", err)
|
|
|
|
}
|
2018-12-01 04:58:55 +00:00
|
|
|
name := path.Base(p)
|
|
|
|
var addFile files.Node
|
2018-08-19 17:39:08 +00:00
|
|
|
if strings.HasPrefix(u.Scheme, "http") {
|
2018-08-21 00:12:36 +00:00
|
|
|
addFile = files.NewWebFile(u)
|
2018-12-01 04:58:55 +00:00
|
|
|
name = path.Base(u.Path)
|
2018-08-19 17:39:08 +00:00
|
|
|
} else {
|
2018-12-01 04:58:55 +00:00
|
|
|
addFile, err = makeSerialFile(p, params)
|
2018-08-19 17:39:08 +00:00
|
|
|
if err != nil {
|
2018-08-20 10:08:49 +00:00
|
|
|
close(out)
|
2018-08-19 17:39:08 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2018-12-01 04:58:55 +00:00
|
|
|
addFiles[i] = files.FileEntry(name, addFile)
|
2018-08-19 17:39:08 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 12:44:06 +00:00
|
|
|
sliceFile := files.NewSliceDirectory(addFiles)
|
2018-08-19 17:39:08 +00:00
|
|
|
// If `form` is set to true, the multipart data will have
|
|
|
|
// a Content-Type of 'multipart/form-data', if `form` is false,
|
|
|
|
// the Content-Type will be 'multipart/mixed'.
|
2018-12-03 14:45:35 +00:00
|
|
|
return c.AddMultiFile(files.NewMultiFileReader(sliceFile, true), params, out)
|
2018-08-19 17:39:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddMultiFile imports new files from a MultiFileReader. See Add().
|
2018-08-30 23:14:06 +00:00
|
|
|
func (c *defaultClient) AddMultiFile(
|
2018-03-25 04:16:25 +00:00
|
|
|
multiFileR *files.MultiFileReader,
|
2018-08-06 10:44:44 +00:00
|
|
|
params *api.AddParams,
|
2018-08-06 20:49:28 +00:00
|
|
|
out chan<- *api.AddedOutput,
|
2018-07-23 09:14:43 +00:00
|
|
|
) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
defer close(out)
|
|
|
|
|
2018-01-30 16:44:43 +00:00
|
|
|
headers := make(map[string]string)
|
|
|
|
headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary()
|
2018-08-06 10:44:44 +00:00
|
|
|
queryStr := params.ToQueryString()
|
2018-08-06 20:49:28 +00:00
|
|
|
|
2018-10-03 21:03:30 +00:00
|
|
|
// our handler decodes an AddedOutput and puts it
|
|
|
|
// in the out channel.
|
2018-08-06 20:49:28 +00:00
|
|
|
handler := func(dec *json.Decoder) error {
|
|
|
|
if out == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var obj api.AddedOutput
|
|
|
|
err := dec.Decode(&obj)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-10-03 21:03:30 +00:00
|
|
|
out <- &obj
|
2018-08-06 20:49:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-06 10:44:44 +00:00
|
|
|
err := c.doStream(
|
|
|
|
"POST",
|
2018-08-07 11:42:20 +00:00
|
|
|
"/add?"+queryStr,
|
2018-08-06 10:44:44 +00:00
|
|
|
headers,
|
2018-08-06 20:49:28 +00:00
|
|
|
multiFileR,
|
|
|
|
handler,
|
2018-08-06 10:44:44 +00:00
|
|
|
)
|
2018-07-23 09:14:43 +00:00
|
|
|
return err
|
2018-01-29 22:11:23 +00:00
|
|
|
}
|