2017-12-05 20:44:22 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-03-14 08:49:49 +00:00
|
|
|
"context"
|
2017-12-05 20:44:22 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"net/url"
|
2018-03-14 08:49:49 +00:00
|
|
|
"time"
|
2017-12-05 20:44:22 +00:00
|
|
|
|
|
|
|
cid "github.com/ipfs/go-cid"
|
2018-01-29 22:11:23 +00:00
|
|
|
"github.com/ipfs/go-ipfs-cmdkit/files"
|
2017-12-05 20:44:22 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p-peer"
|
|
|
|
ma "github.com/multiformats/go-multiaddr"
|
2017-12-08 14:39:30 +00:00
|
|
|
|
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
2017-12-05 20:44:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// ID returns information about the cluster Peer.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) ID() (api.ID, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var id api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/id", nil, nil, &id)
|
2017-12-05 20:44:22 +00:00
|
|
|
return id.ToID(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Peers requests ID information for all cluster peers.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Peers() ([]api.ID, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var ids []api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/peers", nil, nil, &ids)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.ID, len(ids))
|
|
|
|
for i, id := range ids {
|
|
|
|
result[i] = id.ToID()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
type peerAddBody struct {
|
|
|
|
Addr string `json:"peer_multiaddress"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerAdd adds a new peer to the cluster.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) PeerAdd(addr ma.Multiaddr) (api.ID, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
addrStr := addr.String()
|
|
|
|
body := peerAddBody{addrStr}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buf)
|
|
|
|
enc.Encode(body)
|
|
|
|
|
|
|
|
var id api.IDSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", "/peers", nil, &buf, &id)
|
2017-12-05 20:44:22 +00:00
|
|
|
return id.ToID(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// PeerRm removes a current peer from the cluster
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) PeerRm(id peer.ID) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
return c.do("DELETE", fmt.Sprintf("/peers/%s", id.Pretty()), nil, nil, nil)
|
2017-12-05 20:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pin tracks a Cid with the given replication factor and a name for
|
|
|
|
// human-friendliness.
|
2018-01-15 18:31:51 +00:00
|
|
|
func (c *Client) Pin(ci *cid.Cid, replicationFactorMin, replicationFactorMax int, name string) error {
|
2017-12-05 20:44:22 +00:00
|
|
|
escName := url.QueryEscape(name)
|
|
|
|
err := c.do(
|
|
|
|
"POST",
|
2018-03-14 08:49:49 +00:00
|
|
|
fmt.Sprintf(
|
|
|
|
"/pins/%s?replication_factor_min=%d&replication_factor_max=%d&name=%s",
|
2017-12-05 20:44:22 +00:00
|
|
|
ci.String(),
|
2018-01-15 18:31:51 +00:00
|
|
|
replicationFactorMin,
|
|
|
|
replicationFactorMax,
|
2018-03-14 08:49:49 +00:00
|
|
|
escName,
|
|
|
|
),
|
|
|
|
nil,
|
|
|
|
nil,
|
2018-08-06 20:49:28 +00:00
|
|
|
nil,
|
2018-03-14 08:49:49 +00:00
|
|
|
)
|
2017-12-05 20:44:22 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unpin untracks a Cid from cluster.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Unpin(ci *cid.Cid) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
return c.do("DELETE", fmt.Sprintf("/pins/%s", ci.String()), nil, nil, nil)
|
2017-12-05 20:44:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocations returns the consensus state listing all tracked items and
|
|
|
|
// the peers that should be pinning them.
|
2018-03-18 19:29:02 +00:00
|
|
|
func (c *Client) Allocations(pinType api.PinType) ([]api.Pin, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var pins []api.PinSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/allocations?pintype=%s", pinType.String()), nil, nil, &pins)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.Pin, len(pins))
|
|
|
|
for i, p := range pins {
|
|
|
|
result[i] = p.ToPin()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocation returns the current allocations for a given Cid.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Allocation(ci *cid.Cid) (api.Pin, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var pin api.PinSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
|
2017-12-05 20:44:22 +00:00
|
|
|
return pin.ToPin(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Status returns the current ipfs state for a given Cid. If local is true,
|
|
|
|
// the information affects only the current peer, otherwise the information
|
|
|
|
// is fetched from all cluster peers.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Status(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatusAll gathers Status() for all tracked items.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) StatusAll(local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", fmt.Sprintf("/pins?local=%t", local), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync makes sure the state of a Cid corresponds to the state reported by
|
|
|
|
// the ipfs daemon, and returns it. If local is true, this operation only
|
|
|
|
// happens on the current peer, otherwise it happens on every cluster peer.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Sync(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncAll triggers Sync() operations for all tracked items. It only returns
|
|
|
|
// informations for items that were de-synced or have an error state. If
|
|
|
|
// local is true, the operation is limited to the current peer. Otherwise
|
|
|
|
// it happens on every cluster peer.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) SyncAll(local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/sync?local=%t", local), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
|
|
|
|
// If local is true, the operation is limited to the current peer, otherwise
|
|
|
|
// it happens on every cluster peer.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Recover(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpi api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
|
2017-12-05 20:44:22 +00:00
|
|
|
return gpi.ToGlobalPinInfo(), err
|
|
|
|
}
|
|
|
|
|
|
|
|
// RecoverAll triggers Recover() operations on all tracked items. If local is
|
|
|
|
// true, the operation is limited to the current peer. Otherwise, it happens
|
|
|
|
// everywhere.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) RecoverAll(local bool) ([]api.GlobalPinInfo, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var gpis []api.GlobalPinInfoSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis)
|
2017-12-05 20:44:22 +00:00
|
|
|
result := make([]api.GlobalPinInfo, len(gpis))
|
|
|
|
for i, p := range gpis {
|
|
|
|
result[i] = p.ToGlobalPinInfo()
|
|
|
|
}
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Version returns the ipfs-cluster peer's version.
|
2017-12-08 14:39:30 +00:00
|
|
|
func (c *Client) Version() (api.Version, error) {
|
2017-12-05 20:44:22 +00:00
|
|
|
var ver api.Version
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/version", nil, nil, &ver)
|
2017-12-05 20:44:22 +00:00
|
|
|
return ver, err
|
|
|
|
}
|
2017-10-13 21:12:46 +00:00
|
|
|
|
2018-01-18 02:49:35 +00:00
|
|
|
// GetConnectGraph returns an ipfs-cluster connection graph.
|
2017-10-13 21:12:46 +00:00
|
|
|
// The serialized version, strings instead of pids, is returned
|
|
|
|
func (c *Client) GetConnectGraph() (api.ConnectGraphSerial, error) {
|
|
|
|
var graphS api.ConnectGraphSerial
|
2018-08-06 20:49:28 +00:00
|
|
|
err := c.do("GET", "/health/graph", nil, nil, &graphS)
|
2017-10-13 21:12:46 +00:00
|
|
|
return graphS, err
|
|
|
|
}
|
2018-03-14 08:49:49 +00:00
|
|
|
|
|
|
|
// WaitFor is a utility function that allows for a caller to
|
|
|
|
// wait for a paticular status for a CID. It returns a channel
|
|
|
|
// upon which the caller can wait for the targetStatus.
|
|
|
|
func (c *Client) WaitFor(ctx context.Context, fp StatusFilterParams) (api.GlobalPinInfo, error) {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
sf := newStatusFilter()
|
|
|
|
|
|
|
|
go sf.pollStatus(ctx, c, fp)
|
|
|
|
go sf.filter(ctx, fp)
|
|
|
|
|
2018-03-28 15:11:43 +00:00
|
|
|
var status api.GlobalPinInfo
|
|
|
|
|
2018-03-14 08:49:49 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return api.GlobalPinInfo{}, ctx.Err()
|
|
|
|
case err := <-sf.Err:
|
|
|
|
return api.GlobalPinInfo{}, err
|
2018-03-28 15:11:43 +00:00
|
|
|
case st, ok := <-sf.Out:
|
2018-03-29 08:33:09 +00:00
|
|
|
if !ok { // channel closed
|
2018-03-28 15:11:43 +00:00
|
|
|
return status, nil
|
2018-03-14 08:49:49 +00:00
|
|
|
}
|
2018-03-29 08:33:09 +00:00
|
|
|
status = st
|
2018-03-14 08:49:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// StatusFilterParams contains the parameters required
|
|
|
|
// to filter a stream of status results.
|
|
|
|
type StatusFilterParams struct {
|
2018-03-28 15:11:43 +00:00
|
|
|
Cid *cid.Cid
|
2018-03-14 08:49:49 +00:00
|
|
|
Local bool
|
|
|
|
Target api.TrackerStatus
|
|
|
|
CheckFreq time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type statusFilter struct {
|
|
|
|
In, Out chan api.GlobalPinInfo
|
|
|
|
Done chan struct{}
|
|
|
|
Err chan error
|
|
|
|
}
|
|
|
|
|
|
|
|
func newStatusFilter() *statusFilter {
|
|
|
|
return &statusFilter{
|
|
|
|
In: make(chan api.GlobalPinInfo),
|
|
|
|
Out: make(chan api.GlobalPinInfo),
|
|
|
|
Done: make(chan struct{}),
|
|
|
|
Err: make(chan error),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sf *statusFilter) filter(ctx context.Context, fp StatusFilterParams) {
|
|
|
|
defer close(sf.Done)
|
|
|
|
defer close(sf.Out)
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
sf.Err <- ctx.Err()
|
|
|
|
return
|
|
|
|
case gblPinInfo, more := <-sf.In:
|
|
|
|
if !more {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ok, err := statusReached(fp.Target, gblPinInfo)
|
|
|
|
if err != nil {
|
|
|
|
sf.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-28 14:04:21 +00:00
|
|
|
sf.Out <- gblPinInfo
|
2018-03-14 08:49:49 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sf *statusFilter) pollStatus(ctx context.Context, c *Client, fp StatusFilterParams) {
|
|
|
|
ticker := time.NewTicker(fp.CheckFreq)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
sf.Err <- ctx.Err()
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
gblPinInfo, err := c.Status(fp.Cid, fp.Local)
|
|
|
|
if err != nil {
|
|
|
|
sf.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logger.Debugf("pollStatus: status: %#v", gblPinInfo)
|
|
|
|
sf.In <- gblPinInfo
|
|
|
|
case <-sf.Done:
|
|
|
|
close(sf.In)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo) (bool, error) {
|
|
|
|
for _, pinInfo := range gblPinInfo.PeerMap {
|
|
|
|
switch pinInfo.Status {
|
|
|
|
case target:
|
|
|
|
continue
|
|
|
|
case api.TrackerStatusBug, api.TrackerStatusClusterError, api.TrackerStatusPinError, api.TrackerStatusUnpinError:
|
|
|
|
return false, fmt.Errorf("error has occurred while attempting to reach status: %s", target.String())
|
|
|
|
case api.TrackerStatusRemote:
|
|
|
|
if target == api.TrackerStatusPinned {
|
|
|
|
continue // to next pinInfo
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
default:
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
2018-01-29 22:11:23 +00:00
|
|
|
|
2018-08-06 20:49:28 +00:00
|
|
|
// AddMultiFile adds new files to the cluster, importing and potentially
|
|
|
|
// sharding underlying dags across the ipfs daemons of multiple cluster peers.
|
|
|
|
// Progress can be tracked by passing a channel onto which deliver AddedOutput
|
|
|
|
// updates.
|
2018-03-25 04:16:25 +00:00
|
|
|
func (c *Client) AddMultiFile(
|
|
|
|
multiFileR *files.MultiFileReader,
|
2018-08-06 10:44:44 +00:00
|
|
|
params *api.AddParams,
|
2018-08-06 20:49:28 +00:00
|
|
|
out chan<- *api.AddedOutput,
|
2018-07-23 09:14:43 +00:00
|
|
|
) error {
|
2018-08-06 20:49:28 +00:00
|
|
|
defer close(out)
|
|
|
|
|
2018-01-30 16:44:43 +00:00
|
|
|
headers := make(map[string]string)
|
|
|
|
headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary()
|
2018-08-06 10:44:44 +00:00
|
|
|
queryStr := params.ToQueryString()
|
2018-08-06 20:49:28 +00:00
|
|
|
|
|
|
|
handler := func(dec *json.Decoder) error {
|
|
|
|
if out == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var obj api.AddedOutput
|
|
|
|
err := dec.Decode(&obj)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case out <- &obj:
|
|
|
|
//default:
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-06 10:44:44 +00:00
|
|
|
err := c.doStream(
|
|
|
|
"POST",
|
2018-08-07 11:42:20 +00:00
|
|
|
"/add?"+queryStr,
|
2018-08-06 10:44:44 +00:00
|
|
|
headers,
|
2018-08-06 20:49:28 +00:00
|
|
|
multiFileR,
|
|
|
|
handler,
|
2018-08-06 10:44:44 +00:00
|
|
|
)
|
2018-07-23 09:14:43 +00:00
|
|
|
return err
|
2018-01-29 22:11:23 +00:00
|
|
|
}
|
2018-02-06 00:57:14 +00:00
|
|
|
|
2018-07-23 09:14:43 +00:00
|
|
|
// TODO: Eventually an Add(io.Reader) method for adding raw readers as a multifile should be here.
|