Merge pull request #1607 from ipfs/feat/streaming-status
Pintracker: streaming methods
This commit is contained in:
commit
2d94c42310
|
@ -54,6 +54,9 @@ func init() {
|
|||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// StreamChannelSize is used to define buffer sizes for channels.
|
||||
const StreamChannelSize = 1024
|
||||
|
||||
// Common errors
|
||||
var (
|
||||
// ErrNoEndpointEnabled is returned when the API is created but
|
||||
|
@ -583,19 +586,23 @@ func (api *API) SendResponse(
|
|||
w.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Iterator is a function that returns the next item.
|
||||
type Iterator func() (interface{}, bool, error)
|
||||
// StreamIterator is a function that returns the next item. It is used in
|
||||
// StreamResponse.
|
||||
type StreamIterator func() (interface{}, bool, error)
|
||||
|
||||
// StreamResponse reads from an iterator and sends the response.
|
||||
func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) {
|
||||
func (api *API) StreamResponse(w http.ResponseWriter, next StreamIterator, errCh chan error) {
|
||||
api.SetHeaders(w)
|
||||
enc := json.NewEncoder(w)
|
||||
flusher, flush := w.(http.Flusher)
|
||||
w.Header().Set("Trailer", "X-Stream-Error")
|
||||
|
||||
total := 0
|
||||
var err error
|
||||
var ok bool
|
||||
var item interface{}
|
||||
for {
|
||||
item, ok, err := next()
|
||||
item, ok, err = next()
|
||||
if total == 0 {
|
||||
if err != nil {
|
||||
st := http.StatusInternalServerError
|
||||
|
@ -612,16 +619,15 @@ func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) {
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if err != nil {
|
||||
w.Header().Set("X-Stream-Error", err.Error())
|
||||
// trailer error
|
||||
return
|
||||
break
|
||||
}
|
||||
|
||||
// finish just fine
|
||||
if !ok {
|
||||
return
|
||||
break
|
||||
}
|
||||
|
||||
// we have an item
|
||||
|
@ -635,9 +641,19 @@ func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) {
|
|||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
w.Header().Set("X-Stream-Error", err.Error())
|
||||
}
|
||||
// check for function errors
|
||||
for funcErr := range errCh {
|
||||
if funcErr != nil {
|
||||
w.Header().Add("X-Stream-Error", funcErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetsHeaders sets all the headers that are common to all responses
|
||||
// SetHeaders sets all the headers that are common to all responses
|
||||
// from this API. Called automatically from SendResponse().
|
||||
func (api *API) SetHeaders(w http.ResponseWriter) {
|
||||
for header, values := range api.config.Headers {
|
||||
|
|
|
@ -54,7 +54,7 @@ func ProcessResp(t *testing.T, httpResp *http.Response, err error, resp interfac
|
|||
|
||||
// ProcessStreamingResp decodes a streaming response into the given type
|
||||
// and fails the test on error.
|
||||
func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) {
|
||||
func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}, trailerError bool) {
|
||||
if err != nil {
|
||||
t.Fatal("error making streaming request: ", err)
|
||||
}
|
||||
|
@ -97,6 +97,13 @@ func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp
|
|||
}
|
||||
}
|
||||
}
|
||||
trailerMsg := httpResp.Trailer.Get("X-Stream-Error")
|
||||
if trailerError && trailerMsg == "" {
|
||||
t.Error("expected trailer error")
|
||||
}
|
||||
if !trailerError && trailerMsg != "" {
|
||||
t.Error("got trailer error: ", trailerMsg)
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHeaders checks that all the headers are set to what is expected.
|
||||
|
@ -246,19 +253,19 @@ func MakeStreamingPost(t *testing.T, api API, url string, body io.Reader, conten
|
|||
req.Header.Set("Content-Type", contentType)
|
||||
req.Header.Set("Origin", ClientOrigin)
|
||||
httpResp, err := c.Do(req)
|
||||
ProcessStreamingResp(t, httpResp, err, resp)
|
||||
ProcessStreamingResp(t, httpResp, err, resp, false)
|
||||
CheckHeaders(t, api.Headers(), url, httpResp.Header)
|
||||
}
|
||||
|
||||
// MakeStreamingGet performs a GET request and uses ProcessStreamingResp
|
||||
func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}) {
|
||||
func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}, trailerError bool) {
|
||||
h := MakeHost(t, api)
|
||||
defer h.Close()
|
||||
c := HTTPClient(t, h, IsHTTPS(url))
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req.Header.Set("Origin", ClientOrigin)
|
||||
httpResp, err := c.Do(req)
|
||||
ProcessStreamingResp(t, httpResp, err, resp)
|
||||
ProcessStreamingResp(t, httpResp, err, resp, trailerError)
|
||||
CheckHeaders(t, api.Headers(), url, httpResp.Header)
|
||||
}
|
||||
|
||||
|
|
|
@ -386,10 +386,15 @@ func (proxy *Server) unpinHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
proxy.setHeaders(w.Header(), r)
|
||||
|
||||
pinLs := ipfsPinLsResp{}
|
||||
pinLs.Keys = make(map[string]ipfsPinType)
|
||||
|
||||
arg := r.URL.Query().Get("arg")
|
||||
|
||||
stream := false
|
||||
streamArg := r.URL.Query().Get("stream")
|
||||
streamArg2 := r.URL.Query().Get("s")
|
||||
if streamArg == "true" || streamArg2 == "true" {
|
||||
stream = true
|
||||
}
|
||||
|
||||
if arg != "" {
|
||||
c, err := cid.Decode(arg)
|
||||
if err != nil {
|
||||
|
@ -409,8 +414,23 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1)
|
||||
return
|
||||
}
|
||||
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
||||
Type: "recursive",
|
||||
if stream {
|
||||
ipinfo := api.IPFSPinInfo{
|
||||
Cid: api.Cid(pin.Cid),
|
||||
Type: pin.Mode.ToIPFSPinStatus(),
|
||||
}
|
||||
resBytes, _ := json.Marshal(ipinfo)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(resBytes)
|
||||
} else {
|
||||
pinLs := ipfsPinLsResp{}
|
||||
pinLs.Keys = make(map[string]ipfsPinType)
|
||||
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
||||
Type: "recursive",
|
||||
}
|
||||
resBytes, _ := json.Marshal(pinLs)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(resBytes)
|
||||
}
|
||||
} else {
|
||||
in := make(chan struct{})
|
||||
|
@ -432,22 +452,42 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
)
|
||||
}()
|
||||
|
||||
for pin := range pins {
|
||||
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
||||
Type: "recursive",
|
||||
if stream {
|
||||
w.Header().Set("Trailer", "X-Stream-Error")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
for pin := range pins {
|
||||
ipinfo := api.IPFSPinInfo{
|
||||
Cid: api.Cid(pin.Cid),
|
||||
Type: pin.Mode.ToIPFSPinStatus(),
|
||||
}
|
||||
resBytes, _ := json.Marshal(ipinfo)
|
||||
w.Write(resBytes)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
w.Header().Add("X-Stream-Error", err.Error())
|
||||
return
|
||||
}
|
||||
} else {
|
||||
pinLs := ipfsPinLsResp{}
|
||||
pinLs.Keys = make(map[string]ipfsPinType)
|
||||
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
ipfsErrorResponder(w, err.Error(), -1)
|
||||
return
|
||||
for pin := range pins {
|
||||
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
||||
Type: "recursive",
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
ipfsErrorResponder(w, err.Error(), -1)
|
||||
return
|
||||
}
|
||||
resBytes, _ := json.Marshal(pinLs)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(resBytes)
|
||||
}
|
||||
}
|
||||
|
||||
resBytes, _ := json.Marshal(pinLs)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(resBytes)
|
||||
}
|
||||
|
||||
func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -346,20 +346,27 @@ func (api *API) listPins(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
} else {
|
||||
var globalPinInfos []types.GlobalPinInfo
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAll",
|
||||
tst,
|
||||
&globalPinInfos,
|
||||
)
|
||||
if err != nil {
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
|
||||
return
|
||||
}
|
||||
for i, gpi := range globalPinInfos {
|
||||
in := make(chan types.TrackerStatus, 1)
|
||||
in <- tst
|
||||
close(in)
|
||||
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAll",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
}()
|
||||
|
||||
i := 0
|
||||
for gpi := range out {
|
||||
st := globalPinInfoToSvcPinStatus(gpi.Cid.String(), gpi)
|
||||
if st.Status == pinsvc.StatusUndefined {
|
||||
// i.e things unpinning
|
||||
|
@ -380,10 +387,17 @@ func (api *API) listPins(w http.ResponseWriter, r *http.Request) {
|
|||
continue
|
||||
}
|
||||
pinList.Results = append(pinList.Results, st)
|
||||
if i+1 == opts.Limit {
|
||||
i++
|
||||
if i == opts.Limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err := <-errCh
|
||||
if err != nil {
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pinList.Count = len(pinList.Results)
|
||||
|
|
|
@ -85,9 +85,9 @@ type Client interface {
|
|||
// is fetched from all cluster peers.
|
||||
Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
|
||||
// StatusCids status information for the requested CIDs.
|
||||
StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error)
|
||||
StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error
|
||||
// StatusAll gathers Status() for all tracked items.
|
||||
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error)
|
||||
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error
|
||||
|
||||
// Recover retriggers pin or unpin ipfs operations for a Cid in error
|
||||
// state. If local is true, the operation is limited to the current
|
||||
|
@ -96,7 +96,7 @@ type Client interface {
|
|||
// RecoverAll triggers Recover() operations on all tracked items. If
|
||||
// local is true, the operation is limited to the current peer.
|
||||
// Otherwise, it happens everywhere.
|
||||
RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error)
|
||||
RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error
|
||||
|
||||
// Alerts returns information health events in the cluster (expired
|
||||
// metrics etc.).
|
||||
|
|
|
@ -253,16 +253,13 @@ func (lc *loadBalancingClient) Status(ctx context.Context, ci cid.Cid, local boo
|
|||
// StatusCids returns Status() information for the given Cids. If local is
|
||||
// true, the information affects only the current peer, otherwise the
|
||||
// information is fetched from all cluster peers.
|
||||
func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) {
|
||||
var pinInfos []api.GlobalPinInfo
|
||||
func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
call := func(c Client) error {
|
||||
var err error
|
||||
pinInfos, err = c.StatusCids(ctx, cids, local)
|
||||
return err
|
||||
return c.StatusCids(ctx, cids, local, out)
|
||||
}
|
||||
|
||||
err := lc.retry(0, call)
|
||||
return pinInfos, err
|
||||
return err
|
||||
}
|
||||
|
||||
// StatusAll gathers Status() for all tracked items. If a filter is
|
||||
|
@ -270,16 +267,13 @@ func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, l
|
|||
// will be returned. A filter can be built by merging TrackerStatuses with
|
||||
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
|
||||
// api.TrackerStatusUndefined), means all.
|
||||
func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) {
|
||||
var pinInfos []api.GlobalPinInfo
|
||||
func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
call := func(c Client) error {
|
||||
var err error
|
||||
pinInfos, err = c.StatusAll(ctx, filter, local)
|
||||
return err
|
||||
return c.StatusAll(ctx, filter, local, out)
|
||||
}
|
||||
|
||||
err := lc.retry(0, call)
|
||||
return pinInfos, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
|
||||
|
@ -300,16 +294,13 @@ func (lc *loadBalancingClient) Recover(ctx context.Context, ci cid.Cid, local bo
|
|||
// RecoverAll triggers Recover() operations on all tracked items. If local is
|
||||
// true, the operation is limited to the current peer. Otherwise, it happens
|
||||
// everywhere.
|
||||
func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
|
||||
var pinInfos []api.GlobalPinInfo
|
||||
func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
call := func(c Client) error {
|
||||
var err error
|
||||
pinInfos, err = c.RecoverAll(ctx, local)
|
||||
return err
|
||||
return c.RecoverAll(ctx, local, out)
|
||||
}
|
||||
|
||||
err := lc.retry(0, call)
|
||||
return pinInfos, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Alerts returns things that are wrong with cluster.
|
||||
|
|
|
@ -156,11 +156,11 @@ func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error
|
|||
// Allocations returns the consensus state listing all tracked items and
|
||||
// the peers that should be pinning them.
|
||||
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error {
|
||||
defer close(out)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "client/Allocations")
|
||||
defer span.End()
|
||||
|
||||
defer close(out)
|
||||
|
||||
types := []api.PinType{
|
||||
api.DataType,
|
||||
api.MetaType,
|
||||
|
@ -191,14 +191,13 @@ func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out
|
|||
}
|
||||
|
||||
f := url.QueryEscape(strings.Join(strFilter, ","))
|
||||
err := c.doStream(
|
||||
return c.doStream(
|
||||
ctx,
|
||||
"GET",
|
||||
fmt.Sprintf("/allocations?filter=%s", f),
|
||||
nil,
|
||||
nil,
|
||||
handler)
|
||||
return err
|
||||
}
|
||||
|
||||
// Allocation returns the current allocations for a given Cid.
|
||||
|
@ -233,8 +232,8 @@ func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api
|
|||
// StatusCids returns Status() information for the given Cids. If local is
|
||||
// true, the information affects only the current peer, otherwise the
|
||||
// information is fetched from all cluster peers.
|
||||
func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) {
|
||||
return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local)
|
||||
func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local, out)
|
||||
}
|
||||
|
||||
// StatusAll gathers Status() for all tracked items. If a filter is
|
||||
|
@ -242,21 +241,20 @@ func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bo
|
|||
// will be returned. A filter can be built by merging TrackerStatuses with
|
||||
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
|
||||
// api.TrackerStatusUndefined), means all.
|
||||
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) {
|
||||
return c.statusAllWithCids(ctx, filter, nil, local)
|
||||
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
return c.statusAllWithCids(ctx, filter, nil, local, out)
|
||||
}
|
||||
|
||||
func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) {
|
||||
func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
defer close(out)
|
||||
ctx, span := trace.StartSpan(ctx, "client/StatusAll")
|
||||
defer span.End()
|
||||
|
||||
var gpis []api.GlobalPinInfo
|
||||
|
||||
filterStr := ""
|
||||
if filter != api.TrackerStatusUndefined { // undefined filter means "all"
|
||||
filterStr = filter.String()
|
||||
if filterStr == "" {
|
||||
return nil, errors.New("invalid filter value")
|
||||
return errors.New("invalid filter value")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,16 +263,25 @@ func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.Tracke
|
|||
cidsStr[i] = c.String()
|
||||
}
|
||||
|
||||
err := c.do(
|
||||
handler := func(dec *json.Decoder) error {
|
||||
var obj api.GlobalPinInfo
|
||||
err := dec.Decode(&obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out <- obj
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.doStream(
|
||||
ctx,
|
||||
"GET",
|
||||
fmt.Sprintf("/pins?local=%t&filter=%s&cids=%s",
|
||||
local, url.QueryEscape(filterStr), strings.Join(cidsStr, ",")),
|
||||
nil,
|
||||
nil,
|
||||
&gpis,
|
||||
handler,
|
||||
)
|
||||
return gpis, err
|
||||
}
|
||||
|
||||
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
|
||||
|
@ -292,13 +299,29 @@ func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (ap
|
|||
// RecoverAll triggers Recover() operations on all tracked items. If local is
|
||||
// true, the operation is limited to the current peer. Otherwise, it happens
|
||||
// everywhere.
|
||||
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) {
|
||||
func (c *defaultClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
|
||||
defer close(out)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "client/RecoverAll")
|
||||
defer span.End()
|
||||
|
||||
var gpis []api.GlobalPinInfo
|
||||
err := c.do(ctx, "POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis)
|
||||
return gpis, err
|
||||
handler := func(dec *json.Decoder) error {
|
||||
var obj api.GlobalPinInfo
|
||||
err := dec.Decode(&obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out <- obj
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.doStream(
|
||||
ctx,
|
||||
"POST",
|
||||
fmt.Sprintf("/pins/recover?local=%t", local),
|
||||
nil,
|
||||
nil,
|
||||
handler)
|
||||
}
|
||||
|
||||
// Alerts returns information health events in the cluster (expired metrics
|
||||
|
|
|
@ -346,10 +346,16 @@ func TestStatusCids(t *testing.T) {
|
|||
defer shutdown(api)
|
||||
|
||||
testF := func(t *testing.T, c Client) {
|
||||
pins, err := c.StatusCids(ctx, []cid.Cid{test.Cid1}, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out := make(chan types.GlobalPinInfo)
|
||||
|
||||
go func() {
|
||||
err := c.StatusCids(ctx, []cid.Cid{test.Cid1}, false, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
pins := collectGlobalPinInfos(t, out)
|
||||
if len(pins) != 1 {
|
||||
t.Fatal("wrong number of pins returned")
|
||||
}
|
||||
|
@ -361,48 +367,87 @@ func TestStatusCids(t *testing.T) {
|
|||
testClients(t, api, testF)
|
||||
}
|
||||
|
||||
func collectGlobalPinInfos(t *testing.T, out <-chan types.GlobalPinInfo) []types.GlobalPinInfo {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var gpis []types.GlobalPinInfo
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
return gpis
|
||||
case gpi, ok := <-out:
|
||||
if !ok {
|
||||
return gpis
|
||||
}
|
||||
gpis = append(gpis, gpi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusAll(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
api := testAPI(t)
|
||||
defer shutdown(api)
|
||||
|
||||
testF := func(t *testing.T, c Client) {
|
||||
pins, err := c.StatusAll(ctx, 0, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out := make(chan types.GlobalPinInfo)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, 0, false, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
pins := collectGlobalPinInfos(t, out)
|
||||
|
||||
if len(pins) == 0 {
|
||||
t.Error("there should be some pins")
|
||||
}
|
||||
|
||||
// With local true
|
||||
pins, err = c.StatusAll(ctx, 0, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out2 := make(chan types.GlobalPinInfo)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, 0, true, out2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
pins = collectGlobalPinInfos(t, out2)
|
||||
|
||||
if len(pins) != 2 {
|
||||
t.Error("there should be two pins")
|
||||
}
|
||||
|
||||
// With filter option
|
||||
pins, err = c.StatusAll(ctx, types.TrackerStatusPinning, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out3 := make(chan types.GlobalPinInfo)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, types.TrackerStatusPinning, false, out3)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
pins = collectGlobalPinInfos(t, out3)
|
||||
|
||||
if len(pins) != 1 {
|
||||
t.Error("there should be one pin")
|
||||
}
|
||||
|
||||
pins, err = c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out4 := make(chan types.GlobalPinInfo)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false, out4)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
pins = collectGlobalPinInfos(t, out4)
|
||||
|
||||
if len(pins) != 2 {
|
||||
t.Error("there should be two pins")
|
||||
}
|
||||
|
||||
_, err = c.StatusAll(ctx, 1<<25, false)
|
||||
out5 := make(chan types.GlobalPinInfo, 1)
|
||||
err := c.StatusAll(ctx, 1<<25, false, out5)
|
||||
if err == nil {
|
||||
t.Error("expected an error")
|
||||
}
|
||||
|
@ -435,12 +480,14 @@ func TestRecoverAll(t *testing.T) {
|
|||
defer shutdown(api)
|
||||
|
||||
testF := func(t *testing.T, c Client) {
|
||||
_, err := c.RecoverAll(ctx, true)
|
||||
out := make(chan types.GlobalPinInfo, 10)
|
||||
err := c.RecoverAll(ctx, true, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = c.RecoverAll(ctx, false)
|
||||
out2 := make(chan types.GlobalPinInfo, 10)
|
||||
err = c.RecoverAll(ctx, false, out2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"github.com/ipfs/ipfs-cluster/adder/adderutils"
|
||||
types "github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/api/common"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
|
@ -457,12 +456,15 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
close(in)
|
||||
|
||||
pins := make(chan types.Pin)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
err := api.rpcClient.Stream(
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
|
@ -470,10 +472,6 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
in,
|
||||
pins,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
iter := func() (interface{}, bool, error) {
|
||||
|
@ -481,6 +479,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var ok bool
|
||||
iterloop:
|
||||
for {
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break iterloop
|
||||
|
@ -498,7 +497,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return p, ok, ctx.Err()
|
||||
}
|
||||
|
||||
api.StreamResponse(w, iter)
|
||||
api.StreamResponse(w, iter, errCh)
|
||||
}
|
||||
|
||||
func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -517,6 +516,9 @@ func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
queryValues := r.URL.Query()
|
||||
if queryValues.Get("cids") != "" {
|
||||
api.statusCidsHandler(w, r)
|
||||
|
@ -525,8 +527,6 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
local := queryValues.Get("local")
|
||||
|
||||
var globalPinInfos []types.GlobalPinInfo
|
||||
|
||||
filterStr := queryValues.Get("filter")
|
||||
filter := types.TrackerStatusFromString(filterStr)
|
||||
// FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a
|
||||
|
@ -536,42 +536,68 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if local == "true" {
|
||||
var pinInfos []types.PinInfo
|
||||
var iter common.StreamIterator
|
||||
in := make(chan types.TrackerStatus, 1)
|
||||
in <- filter
|
||||
close(in)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAllLocal",
|
||||
filter,
|
||||
&pinInfos,
|
||||
)
|
||||
if err != nil {
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
|
||||
return
|
||||
if local == "true" {
|
||||
out := make(chan types.PinInfo, common.StreamChannelSize)
|
||||
iter = func() (interface{}, bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, ctx.Err()
|
||||
case p, ok := <-out:
|
||||
return p.ToGlobal(), ok, nil
|
||||
}
|
||||
}
|
||||
globalPinInfos = pinInfosToGlobal(pinInfos)
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAllLocal",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
}()
|
||||
|
||||
} else {
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAll",
|
||||
filter,
|
||||
&globalPinInfos,
|
||||
)
|
||||
if err != nil {
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
|
||||
return
|
||||
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
|
||||
iter = func() (interface{}, bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, ctx.Err()
|
||||
case p, ok := <-out:
|
||||
return p, ok, nil
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusAll",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
}()
|
||||
}
|
||||
|
||||
api.SendResponse(w, common.SetStatusAutomatically, nil, globalPinInfos)
|
||||
api.StreamResponse(w, iter, errCh)
|
||||
}
|
||||
|
||||
// request statuses for multiple CIDs in parallel.
|
||||
func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
queryValues := r.URL.Query()
|
||||
filterCidsStr := strings.Split(queryValues.Get("cids"), ",")
|
||||
var cids []cid.Cid
|
||||
|
@ -587,17 +613,15 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
local := queryValues.Get("local")
|
||||
|
||||
type gpiResult struct {
|
||||
gpi types.GlobalPinInfo
|
||||
err error
|
||||
}
|
||||
gpiCh := make(chan gpiResult, len(cids))
|
||||
gpiCh := make(chan types.GlobalPinInfo, len(cids))
|
||||
errCh := make(chan error, len(cids))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(cids))
|
||||
|
||||
// Close channel when done
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errCh)
|
||||
close(gpiCh)
|
||||
}()
|
||||
|
||||
|
@ -607,14 +631,18 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
defer wg.Done()
|
||||
var pinInfo types.PinInfo
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
ctx,
|
||||
"",
|
||||
"Cluster",
|
||||
"StatusLocal",
|
||||
c,
|
||||
&pinInfo,
|
||||
)
|
||||
gpiCh <- gpiResult{gpi: pinInfo.ToGlobal(), err: err}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
gpiCh <- pinInfo.ToGlobal()
|
||||
}(ci)
|
||||
}
|
||||
} else {
|
||||
|
@ -623,25 +651,28 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
defer wg.Done()
|
||||
var pinInfo types.GlobalPinInfo
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
ctx,
|
||||
"",
|
||||
"Cluster",
|
||||
"Status",
|
||||
c,
|
||||
&pinInfo,
|
||||
)
|
||||
gpiCh <- gpiResult{gpi: pinInfo, err: err}
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
gpiCh <- pinInfo
|
||||
}(ci)
|
||||
}
|
||||
}
|
||||
|
||||
var gpis []types.GlobalPinInfo
|
||||
var err error
|
||||
for gpiResult := range gpiCh {
|
||||
gpis = append(gpis, gpiResult.gpi)
|
||||
err = multierr.Append(err, gpiResult.err)
|
||||
iter := func() (interface{}, bool, error) {
|
||||
gpi, ok := <-gpiCh
|
||||
return gpi, ok, nil
|
||||
}
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, gpis)
|
||||
|
||||
api.StreamResponse(w, iter, errCh)
|
||||
}
|
||||
|
||||
func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -676,31 +707,66 @@ func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
queryValues := r.URL.Query()
|
||||
local := queryValues.Get("local")
|
||||
|
||||
var iter common.StreamIterator
|
||||
in := make(chan struct{})
|
||||
close(in)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
if local == "true" {
|
||||
var pinInfos []types.PinInfo
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RecoverAllLocal",
|
||||
struct{}{},
|
||||
&pinInfos,
|
||||
)
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfosToGlobal(pinInfos))
|
||||
out := make(chan types.PinInfo, common.StreamChannelSize)
|
||||
iter = func() (interface{}, bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, ctx.Err()
|
||||
case p, ok := <-out:
|
||||
return p.ToGlobal(), ok, nil
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RecoverAllLocal",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
}()
|
||||
|
||||
} else {
|
||||
var globalPinInfos []types.GlobalPinInfo
|
||||
err := api.rpcClient.CallContext(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RecoverAll",
|
||||
struct{}{},
|
||||
&globalPinInfos,
|
||||
)
|
||||
api.SendResponse(w, common.SetStatusAutomatically, err, globalPinInfos)
|
||||
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
|
||||
iter = func() (interface{}, bool, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, false, ctx.Err()
|
||||
case p, ok := <-out:
|
||||
return p, ok, nil
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
errCh <- api.rpcClient.Stream(
|
||||
r.Context(),
|
||||
"",
|
||||
"Cluster",
|
||||
"RecoverAll",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
}()
|
||||
}
|
||||
|
||||
api.StreamResponse(w, iter, errCh)
|
||||
}
|
||||
|
||||
func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -772,12 +838,3 @@ func repoGCToGlobal(r types.RepoGC) types.GlobalRepoGC {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
func pinInfosToGlobal(pInfos []types.PinInfo) []types.GlobalPinInfo {
|
||||
gPInfos := make([]types.GlobalPinInfo, len(pInfos))
|
||||
for i, p := range pInfos {
|
||||
gpi := p.ToGlobal()
|
||||
gPInfos[i] = gpi
|
||||
}
|
||||
return gPInfos
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ func TestAPIAddFileEndpointShard(t *testing.T) {
|
|||
defer closer.Close()
|
||||
mpContentType := "multipart/form-data; boundary=" + body.Boundary()
|
||||
resp := api.AddedOutput{}
|
||||
fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true"
|
||||
fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true&shard-size=1000000"
|
||||
shardURL := url(rest) + fmtStr1
|
||||
test.MakeStreamingPost(t, rest, shardURL, body, mpContentType, &resp)
|
||||
}
|
||||
|
@ -507,14 +507,14 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
|
|||
|
||||
tf := func(t *testing.T, url test.URLFunc) {
|
||||
var resp []api.Pin
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp)
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp, false)
|
||||
if len(resp) != 3 ||
|
||||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
|
||||
!resp[2].Cid.Equals(clustertest.Cid3) {
|
||||
t.Error("unexpected pin list: ", resp)
|
||||
}
|
||||
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp)
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp, false)
|
||||
if len(resp) != 3 ||
|
||||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
|
||||
!resp[2].Cid.Equals(clustertest.Cid3) {
|
||||
|
@ -522,7 +522,7 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
errResp := api.Error{}
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp)
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp, false)
|
||||
if errResp.Code != http.StatusBadRequest {
|
||||
t.Error("an invalid filter value should 400")
|
||||
}
|
||||
|
@ -615,8 +615,9 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
|
|||
defer rest.Shutdown(ctx)
|
||||
|
||||
tf := func(t *testing.T, url test.URLFunc) {
|
||||
var resp []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins", &resp)
|
||||
var resp []api.GlobalPinInfo
|
||||
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins", &resp, false)
|
||||
|
||||
// mockPinTracker returns 3 items for Cluster.StatusAll
|
||||
if len(resp) != 3 ||
|
||||
|
@ -626,8 +627,8 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test local=true
|
||||
var resp2 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?local=true", &resp2)
|
||||
var resp2 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?local=true", &resp2, false)
|
||||
// mockPinTracker calls pintracker.StatusAll which returns 2
|
||||
// items.
|
||||
if len(resp2) != 2 {
|
||||
|
@ -635,38 +636,38 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test with filter
|
||||
var resp3 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=queued", &resp3)
|
||||
var resp3 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=queued", &resp3, false)
|
||||
if len(resp3) != 0 {
|
||||
t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3)
|
||||
}
|
||||
|
||||
var resp4 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4)
|
||||
var resp4 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4, false)
|
||||
if len(resp4) != 1 {
|
||||
t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4)
|
||||
}
|
||||
|
||||
var resp5 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5)
|
||||
var resp5 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5, false)
|
||||
if len(resp5) != 1 {
|
||||
t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5)
|
||||
}
|
||||
|
||||
var resp6 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=error", &resp6)
|
||||
var resp6 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error", &resp6, false)
|
||||
if len(resp6) != 1 {
|
||||
t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6)
|
||||
}
|
||||
|
||||
var resp7 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7)
|
||||
var resp7 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7, false)
|
||||
if len(resp7) != 2 {
|
||||
t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7)
|
||||
}
|
||||
|
||||
var errorResp api.Error
|
||||
test.MakeGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp)
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp, false)
|
||||
if errorResp.Code != http.StatusBadRequest {
|
||||
t.Error("an invalid filter value should 400")
|
||||
}
|
||||
|
@ -681,32 +682,32 @@ func TestAPIStatusAllWithCidsEndpoint(t *testing.T) {
|
|||
defer rest.Shutdown(ctx)
|
||||
|
||||
tf := func(t *testing.T, url test.URLFunc) {
|
||||
var resp []*api.GlobalPinInfo
|
||||
var resp []api.GlobalPinInfo
|
||||
cids := []string{
|
||||
clustertest.Cid1.String(),
|
||||
clustertest.Cid2.String(),
|
||||
clustertest.Cid3.String(),
|
||||
clustertest.Cid4.String(),
|
||||
}
|
||||
test.MakeGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp)
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp, false)
|
||||
|
||||
if len(resp) != 4 {
|
||||
t.Error("wrong number of responses")
|
||||
}
|
||||
|
||||
// Test local=true
|
||||
var resp2 []*api.GlobalPinInfo
|
||||
test.MakeGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2)
|
||||
var resp2 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2, false)
|
||||
if len(resp2) != 4 {
|
||||
t.Error("wrong number of responses")
|
||||
}
|
||||
|
||||
// Test with an error
|
||||
// Test with an error. This should produce a trailer error.
|
||||
cids = append(cids, clustertest.ErrorCid.String())
|
||||
var errorResp api.Error
|
||||
test.MakeGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &errorResp)
|
||||
if errorResp.Message != clustertest.ErrBadCid.Error() {
|
||||
t.Error("expected an error")
|
||||
var resp3 []api.GlobalPinInfo
|
||||
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp3, true)
|
||||
if len(resp3) != 4 {
|
||||
t.Error("wrong number of responses")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -782,14 +783,14 @@ func TestAPIRecoverAllEndpoint(t *testing.T) {
|
|||
defer rest.Shutdown(ctx)
|
||||
|
||||
tf := func(t *testing.T, url test.URLFunc) {
|
||||
var resp []*api.GlobalPinInfo
|
||||
test.MakePost(t, rest, url(rest)+"/pins/recover?local=true", []byte{}, &resp)
|
||||
var resp []api.GlobalPinInfo
|
||||
test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover?local=true", nil, "", &resp)
|
||||
if len(resp) != 0 {
|
||||
t.Fatal("bad response length")
|
||||
}
|
||||
|
||||
var resp1 []*api.GlobalPinInfo
|
||||
test.MakePost(t, rest, url(rest)+"/pins/recover", []byte{}, &resp1)
|
||||
var resp1 []api.GlobalPinInfo
|
||||
test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover", nil, "", &resp1)
|
||||
if len(resp1) == 0 {
|
||||
t.Fatal("bad response length")
|
||||
}
|
||||
|
|
99
api/types.go
99
api/types.go
|
@ -217,6 +217,36 @@ func IPFSPinStatusFromString(t string) IPFSPinStatus {
|
|||
}
|
||||
}
|
||||
|
||||
// String returns the string form of the status as written by IPFS.
|
||||
func (ips IPFSPinStatus) String() string {
|
||||
switch ips {
|
||||
case IPFSPinStatusDirect:
|
||||
return "direct"
|
||||
case IPFSPinStatusRecursive:
|
||||
return "recursive"
|
||||
case IPFSPinStatusIndirect:
|
||||
return "indirect"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a status from JSON
|
||||
func (ips *IPFSPinStatus) UnmarshalJSON(b []byte) error {
|
||||
var str string
|
||||
err := json.Unmarshal(b, &str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ips = IPFSPinStatusFromString(str)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts a status to JSON.
|
||||
func (ips IPFSPinStatus) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(ips.String())
|
||||
}
|
||||
|
||||
// IsPinned returns true if the item is pinned as expected by the
|
||||
// maxDepth parameter.
|
||||
func (ips IPFSPinStatus) IsPinned(maxDepth PinDepth) bool {
|
||||
|
@ -247,6 +277,40 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
|
|||
IPFSPinStatusError: TrackerStatusClusterError, //TODO(ajl): check suitability
|
||||
}
|
||||
|
||||
// Cid is a CID with the MarshalJSON/UnmarshalJSON methods overwritten.
|
||||
type Cid cid.Cid
|
||||
|
||||
func (c Cid) String() string {
|
||||
return cid.Cid(c).String()
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a CID as JSON as a normal CID string.
|
||||
func (c Cid) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(c.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads a CID from its representation as JSON string.
|
||||
func (c *Cid) UnmarshalJSON(b []byte) error {
|
||||
var cidStr string
|
||||
err := json.Unmarshal(b, &cidStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cc, err := cid.Decode(cidStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = Cid(cc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IPFSPinInfo represents an IPFS Pin, which only has a CID and type.
|
||||
// Its JSON form is what IPFS returns when querying a pinset.
|
||||
type IPFSPinInfo struct {
|
||||
Cid Cid `json:"Cid" codec:"c"`
|
||||
Type IPFSPinStatus `json:"Type" codec:"t"`
|
||||
}
|
||||
|
||||
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
|
||||
// indexed by cluster peer.
|
||||
type GlobalPinInfo struct {
|
||||
|
@ -320,6 +384,19 @@ type PinInfoShort struct {
|
|||
PriorityPin bool `json:"priority_pin" codec:"y,omitempty"`
|
||||
}
|
||||
|
||||
// String provides a string representation of PinInfoShort.
|
||||
func (pis PinInfoShort) String() string {
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "status: %s\n", pis.Status)
|
||||
fmt.Fprintf(&b, "peername: %s\n", pis.PeerName)
|
||||
fmt.Fprintf(&b, "ipfs: %s\n", pis.IPFS)
|
||||
fmt.Fprintf(&b, "ipfsAddresses: %v\n", pis.IPFSAddresses)
|
||||
fmt.Fprintf(&b, "error: %s\n", pis.Error)
|
||||
fmt.Fprintf(&b, "attemptCount: %d\n", pis.AttemptCount)
|
||||
fmt.Fprintf(&b, "priority: %t\n", pis.PriorityPin)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// PinInfo holds information about local pins. This is used by the Pin
|
||||
// Trackers.
|
||||
type PinInfo struct {
|
||||
|
@ -347,6 +424,17 @@ func (pi PinInfo) Defined() bool {
|
|||
return pi.Cid.Defined()
|
||||
}
|
||||
|
||||
// String provides a string representation of PinInfo.
|
||||
func (pi PinInfo) String() string {
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "cid: %s\n", pi.Cid)
|
||||
fmt.Fprintf(&b, "name: %s\n", pi.Name)
|
||||
fmt.Fprintf(&b, "peer: %s\n", pi.Peer)
|
||||
fmt.Fprintf(&b, "allocations: %v\n", pi.Allocations)
|
||||
fmt.Fprintf(&b, "%s\n", pi.PinInfoShort)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Version holds version information
|
||||
type Version struct {
|
||||
Version string `json:"version" codec:"v"`
|
||||
|
@ -571,6 +659,17 @@ func (pm PinMode) String() string {
|
|||
}
|
||||
}
|
||||
|
||||
// ToIPFSPinStatus converts a PinMode to IPFSPinStatus.
|
||||
func (pm PinMode) ToIPFSPinStatus() IPFSPinStatus {
|
||||
if pm == PinModeDirect {
|
||||
return IPFSPinStatusDirect
|
||||
}
|
||||
if pm == PinModeRecursive {
|
||||
return IPFSPinStatusRecursive
|
||||
}
|
||||
return IPFSPinStatusBug
|
||||
}
|
||||
|
||||
// MarshalJSON converts the PinMode into a readable string in JSON.
|
||||
func (pm PinMode) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(pm.String())
|
||||
|
|
230
cluster.go
230
cluster.go
|
@ -271,7 +271,16 @@ func (c *Cluster) watchPinset() {
|
|||
stateSyncTimer.Reset(c.config.StateSyncInterval)
|
||||
case <-recoverTimer.C:
|
||||
logger.Debug("auto-triggering RecoverAllLocal()")
|
||||
c.RecoverAllLocal(ctx)
|
||||
|
||||
out := make(chan api.PinInfo, 1024)
|
||||
go func() {
|
||||
for range out {
|
||||
}
|
||||
}()
|
||||
err := c.RecoverAllLocal(ctx, out)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
recoverTimer.Reset(c.config.PinRecoverInterval)
|
||||
case <-c.ctx.Done():
|
||||
if !stateSyncTimer.Stop() {
|
||||
|
@ -436,6 +445,12 @@ func (c *Cluster) pushPingMetrics(ctx context.Context) {
|
|||
|
||||
ticker := time.NewTicker(c.config.MonitorPingInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
c.sendPingMetric(ctx)
|
||||
|
||||
select {
|
||||
|
@ -507,11 +522,13 @@ func (c *Cluster) alertsHandler() {
|
|||
return
|
||||
}
|
||||
|
||||
pinCh, err := cState.List(c.ctx)
|
||||
if err != nil {
|
||||
logger.Warn(err)
|
||||
return
|
||||
}
|
||||
pinCh := make(chan api.Pin, 1024)
|
||||
go func() {
|
||||
err = cState.List(c.ctx, pinCh)
|
||||
if err != nil {
|
||||
logger.Warn(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for pin := range pinCh {
|
||||
if containsPeer(pin.Allocations, alrt.Peer) && distance.isClosest(pin.Cid) {
|
||||
|
@ -529,11 +546,17 @@ func (c *Cluster) watchPeers() {
|
|||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
// logger.Debugf("%s watching peers", c.id)
|
||||
//logger.Debugf("%s watching peers", c.id)
|
||||
hasMe := false
|
||||
peers, err := c.consensus.Peers(c.ctx)
|
||||
if err != nil {
|
||||
|
@ -594,11 +617,14 @@ func (c *Cluster) vacatePeer(ctx context.Context, p peer.ID) {
|
|||
logger.Warn(err)
|
||||
return
|
||||
}
|
||||
pinCh, err := cState.List(ctx)
|
||||
if err != nil {
|
||||
logger.Warn(err)
|
||||
return
|
||||
}
|
||||
|
||||
pinCh := make(chan api.Pin, 1024)
|
||||
go func() {
|
||||
err = cState.List(ctx, pinCh)
|
||||
if err != nil {
|
||||
logger.Warn(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for pin := range pinCh {
|
||||
if containsPeer(pin.Allocations, p) {
|
||||
|
@ -1070,7 +1096,13 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
|
|||
}
|
||||
|
||||
// Start pinning items in the state that are not on IPFS yet.
|
||||
c.RecoverAllLocal(ctx)
|
||||
out := make(chan api.PinInfo, 1024)
|
||||
// discard outputs
|
||||
go func() {
|
||||
for range out {
|
||||
}
|
||||
}()
|
||||
go c.RecoverAllLocal(ctx, out)
|
||||
|
||||
logger.Infof("%s: joined %s's cluster", c.id.Pretty(), pid.Pretty())
|
||||
return nil
|
||||
|
@ -1100,6 +1132,8 @@ func (c *Cluster) distances(ctx context.Context, exclude peer.ID) (*distanceChec
|
|||
func (c *Cluster) StateSync(ctx context.Context) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/StateSync")
|
||||
defer span.End()
|
||||
logger.Debug("StateSync")
|
||||
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
if c.config.FollowerMode {
|
||||
|
@ -1122,10 +1156,13 @@ func (c *Cluster) StateSync(ctx context.Context) error {
|
|||
return err // could not list peers
|
||||
}
|
||||
|
||||
clusterPins, err := cState.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterPins := make(chan api.Pin, 1024)
|
||||
go func() {
|
||||
err = cState.List(ctx, clusterPins)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Unpin expired items when we are the closest peer to them.
|
||||
for p := range clusterPins {
|
||||
|
@ -1140,24 +1177,29 @@ func (c *Cluster) StateSync(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers.
|
||||
// If an error happens, the slice will contain as much information as
|
||||
// could be fetched from other peers.
|
||||
func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus) ([]api.GlobalPinInfo, error) {
|
||||
// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers on
|
||||
// the out channel. This is done by broacasting a StatusAll to all peers. If
|
||||
// an error happens, it is returned. This method blocks until it finishes. The
|
||||
// operation can be aborted by cancelling the context.
|
||||
func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/StatusAll")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
return c.globalPinInfoSlice(ctx, "PinTracker", "StatusAll", filter)
|
||||
in := make(chan api.TrackerStatus, 1)
|
||||
in <- filter
|
||||
close(in)
|
||||
return c.globalPinInfoStream(ctx, "PinTracker", "StatusAll", in, out)
|
||||
}
|
||||
|
||||
// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer.
|
||||
func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus) []api.PinInfo {
|
||||
// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer on
|
||||
// the out channel. It blocks until finished.
|
||||
func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/StatusAllLocal")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
return c.tracker.StatusAll(ctx, filter)
|
||||
return c.tracker.StatusAll(ctx, filter, out)
|
||||
}
|
||||
|
||||
// Status returns the GlobalPinInfo for a given Cid as fetched from all
|
||||
|
@ -1206,13 +1248,15 @@ func (c *Cluster) localPinInfoOp(
|
|||
return pInfo, err
|
||||
}
|
||||
|
||||
// RecoverAll triggers a RecoverAllLocal operation on all peers.
|
||||
func (c *Cluster) RecoverAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
|
||||
// RecoverAll triggers a RecoverAllLocal operation on all peers and returns
|
||||
// GlobalPinInfo objets for all recovered items. This method blocks until
|
||||
// finished. Operation can be aborted by cancelling the context.
|
||||
func (c *Cluster) RecoverAll(ctx context.Context, out chan<- api.GlobalPinInfo) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/RecoverAll")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
return c.globalPinInfoSlice(ctx, "Cluster", "RecoverAllLocal", nil)
|
||||
return c.globalPinInfoStream(ctx, "Cluster", "RecoverAllLocal", nil, out)
|
||||
}
|
||||
|
||||
// RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked
|
||||
|
@ -1222,15 +1266,16 @@ func (c *Cluster) RecoverAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
|
|||
// is faster than calling Pin on the same CID as it avoids committing an
|
||||
// identical pin to the consensus layer.
|
||||
//
|
||||
// It returns the list of pins that were re-queued for pinning.
|
||||
// It returns the list of pins that were re-queued for pinning on the out
|
||||
// channel. It blocks until done.
|
||||
//
|
||||
// RecoverAllLocal is called automatically every PinRecoverInterval.
|
||||
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) {
|
||||
func (c *Cluster) RecoverAllLocal(ctx context.Context, out chan<- api.PinInfo) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
return c.tracker.RecoverAll(ctx)
|
||||
return c.tracker.RecoverAll(ctx, out)
|
||||
}
|
||||
|
||||
// Recover triggers a recover operation for a given Cid in all
|
||||
|
@ -1261,48 +1306,45 @@ func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (api.PinInfo, err
|
|||
return c.localPinInfoOp(ctx, h, c.tracker.Recover)
|
||||
}
|
||||
|
||||
// PinsChannel returns a channel from which to read all the pins in the
|
||||
// pinset, which are part of the current global state. This is the source of
|
||||
// truth as to which pins are managed and their allocation, but does not
|
||||
// indicate if the item is successfully pinned. For that, use the Status*()
|
||||
// methods.
|
||||
// Pins sends pins on the given out channel as it iterates the full
|
||||
// pinset (current global state). This is the source of truth as to which pins
|
||||
// are managed and their allocation, but does not indicate if the item is
|
||||
// successfully pinned. For that, use the Status*() methods.
|
||||
//
|
||||
// The channel can be aborted by cancelling the context.
|
||||
func (c *Cluster) PinsChannel(ctx context.Context) (<-chan api.Pin, error) {
|
||||
_, span := trace.StartSpan(ctx, "cluster/PinsChannel")
|
||||
// The operation can be aborted by cancelling the context. This methods blocks
|
||||
// until the operation has completed.
|
||||
func (c *Cluster) Pins(ctx context.Context, out chan<- api.Pin) error {
|
||||
_, span := trace.StartSpan(ctx, "cluster/Pins")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
cState, err := c.consensus.State(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return cState.List(ctx)
|
||||
return cState.List(ctx, out)
|
||||
}
|
||||
|
||||
// Pins returns the list of Cids managed by Cluster and which are part
|
||||
// pinsSlice returns the list of Cids managed by Cluster and which are part
|
||||
// of the current global state. This is the source of truth as to which
|
||||
// pins are managed and their allocation, but does not indicate if
|
||||
// the item is successfully pinned. For that, use StatusAll().
|
||||
//
|
||||
// It is recommended to use PinsChannel(), as this method is equivalent to
|
||||
// loading the full pinset in memory!
|
||||
func (c *Cluster) Pins(ctx context.Context) ([]api.Pin, error) {
|
||||
_, span := trace.StartSpan(ctx, "cluster/Pins")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(c.ctx, span)
|
||||
|
||||
ch, err := c.PinsChannel(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (c *Cluster) pinsSlice(ctx context.Context) ([]api.Pin, error) {
|
||||
out := make(chan api.Pin, 1024)
|
||||
var err error
|
||||
go func() {
|
||||
err = c.Pins(ctx, out)
|
||||
}()
|
||||
|
||||
var pins []api.Pin
|
||||
for pin := range ch {
|
||||
for pin := range out {
|
||||
pins = append(pins, pin)
|
||||
}
|
||||
return pins, ctx.Err()
|
||||
return pins, err
|
||||
}
|
||||
|
||||
// PinGet returns information for a single Cid managed by Cluster.
|
||||
|
@ -1751,14 +1793,12 @@ func (c *Cluster) peersWithFilter(ctx context.Context, peers []peer.ID) []api.ID
|
|||
if rpc.IsAuthorizationError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
ids[i] = api.ID{}
|
||||
ids[i].ID = peers[i]
|
||||
ids[i].Error = err.Error()
|
||||
}
|
||||
|
||||
return ids
|
||||
|
||||
}
|
||||
|
||||
// getTrustedPeers gives listed of trusted peers except the current peer and
|
||||
|
@ -1935,15 +1975,18 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
|
|||
return gpin, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, arg interface{}) ([]api.GlobalPinInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoSlice")
|
||||
func (c *Cluster) globalPinInfoStream(ctx context.Context, comp, method string, inChan interface{}, out chan<- api.GlobalPinInfo) error {
|
||||
defer close(out)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoStream")
|
||||
defer span.End()
|
||||
|
||||
if arg == nil {
|
||||
arg = struct{}{}
|
||||
if inChan == nil {
|
||||
emptyChan := make(chan struct{})
|
||||
close(emptyChan)
|
||||
inChan = emptyChan
|
||||
}
|
||||
|
||||
infos := make([]api.GlobalPinInfo, 0)
|
||||
fullMap := make(map[cid.Cid]api.GlobalPinInfo)
|
||||
|
||||
var members []peer.ID
|
||||
|
@ -1954,27 +1997,31 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
|
|||
members, err = c.consensus.Peers(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
lenMembers := len(members)
|
||||
|
||||
replies := make([][]api.PinInfo, lenMembers)
|
||||
msOut := make(chan api.PinInfo)
|
||||
|
||||
// We don't have a good timeout proposal for this. Depending on the
|
||||
// size of the state and the peformance of IPFS and the network, this
|
||||
// may take moderately long.
|
||||
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
|
||||
defer rpcutil.MultiCancel(cancels)
|
||||
// If we did, this is the place to put it.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
errs := c.rpcClient.MultiCall(
|
||||
ctxs,
|
||||
members,
|
||||
comp,
|
||||
method,
|
||||
arg,
|
||||
rpcutil.CopyPinInfoSliceToIfaces(replies),
|
||||
)
|
||||
errsCh := make(chan []error, 1)
|
||||
go func() {
|
||||
defer close(errsCh)
|
||||
errsCh <- c.rpcClient.MultiStream(
|
||||
ctx,
|
||||
members,
|
||||
comp,
|
||||
method,
|
||||
inChan,
|
||||
msOut,
|
||||
)
|
||||
}()
|
||||
|
||||
setPinInfo := func(p api.PinInfo) {
|
||||
if !p.Defined() {
|
||||
|
@ -1989,20 +2036,25 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
|
|||
fullMap[p.Cid] = info
|
||||
}
|
||||
|
||||
// make the big collection.
|
||||
for pin := range msOut {
|
||||
setPinInfo(pin)
|
||||
}
|
||||
|
||||
// This WAITs until MultiStream is DONE.
|
||||
erroredPeers := make(map[peer.ID]string)
|
||||
for i, r := range replies {
|
||||
if e := errs[i]; e != nil { // This error must come from not being able to contact that cluster member
|
||||
if rpc.IsAuthorizationError(e) {
|
||||
logger.Debug("rpc auth error", e)
|
||||
errs, ok := <-errsCh
|
||||
if ok {
|
||||
for i, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
|
||||
erroredPeers[members[i]] = e.Error()
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pin := range r {
|
||||
setPinInfo(pin)
|
||||
if rpc.IsAuthorizationError(err) {
|
||||
logger.Debug("rpc auth error", err)
|
||||
continue
|
||||
}
|
||||
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], err)
|
||||
erroredPeers[members[i]] = err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2031,10 +2083,16 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
|
|||
}
|
||||
|
||||
for _, v := range fullMap {
|
||||
infos = append(infos, v)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := fmt.Errorf("%s.%s aborted: %w", comp, method, ctx.Err())
|
||||
logger.Error(err)
|
||||
return err
|
||||
case out <- v:
|
||||
}
|
||||
}
|
||||
|
||||
return infos, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) {
|
||||
|
|
|
@ -64,17 +64,17 @@ func (ipfs *mockConnector) Pin(ctx context.Context, pin api.Pin) error {
|
|||
if pin.Cid == test.ErrorCid {
|
||||
return errors.New("trying to pin ErrorCid")
|
||||
}
|
||||
ipfs.pins.Store(pin.Cid.String(), pin.MaxDepth)
|
||||
ipfs.pins.Store(pin.Cid, pin.MaxDepth)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error {
|
||||
ipfs.pins.Delete(c.String())
|
||||
ipfs.pins.Delete(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinStatus, error) {
|
||||
dI, ok := ipfs.pins.Load(pin.Cid.String())
|
||||
dI, ok := ipfs.pins.Load(pin.Cid)
|
||||
if !ok {
|
||||
return api.IPFSPinStatusUnpinned, nil
|
||||
}
|
||||
|
@ -85,8 +85,9 @@ func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSP
|
|||
return api.IPFSPinStatusRecursive, nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string]api.IPFSPinStatus, error) {
|
||||
m := make(map[string]api.IPFSPinStatus)
|
||||
func (ipfs *mockConnector) PinLs(ctx context.Context, in []string, out chan<- api.IPFSPinInfo) error {
|
||||
defer close(out)
|
||||
|
||||
var st api.IPFSPinStatus
|
||||
ipfs.pins.Range(func(k, v interface{}) bool {
|
||||
switch v.(api.PinDepth) {
|
||||
|
@ -95,12 +96,13 @@ func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string
|
|||
default:
|
||||
st = api.IPFSPinStatusRecursive
|
||||
}
|
||||
c := k.(cid.Cid)
|
||||
|
||||
m[k.(string)] = st
|
||||
out <- api.IPFSPinInfo{Cid: api.Cid(c), Type: st}
|
||||
return true
|
||||
})
|
||||
|
||||
return m, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
|
||||
|
@ -795,7 +797,7 @@ func TestClusterPins(t *testing.T) {
|
|||
|
||||
pinDelay()
|
||||
|
||||
pins, err := cl.Pins(ctx)
|
||||
pins, err := cl.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -942,10 +944,16 @@ func TestClusterRecoverAllLocal(t *testing.T) {
|
|||
|
||||
pinDelay()
|
||||
|
||||
recov, err := cl.RecoverAllLocal(ctx)
|
||||
if err != nil {
|
||||
t.Error("did not expect an error")
|
||||
}
|
||||
out := make(chan api.PinInfo, 10)
|
||||
go func() {
|
||||
err := cl.RecoverAllLocal(ctx, out)
|
||||
if err != nil {
|
||||
t.Error("did not expect an error")
|
||||
}
|
||||
}()
|
||||
|
||||
recov := collectPinInfos(t, out)
|
||||
|
||||
if len(recov) != 1 {
|
||||
t.Fatalf("there should be one pin recovered, got = %d", len(recov))
|
||||
}
|
||||
|
|
|
@ -39,17 +39,23 @@ func jsonFormatObject(resp interface{}) {
|
|||
}
|
||||
|
||||
func jsonFormatPrint(obj interface{}) {
|
||||
print := func(o interface{}) {
|
||||
j, err := json.MarshalIndent(o, "", " ")
|
||||
checkErr("generating json output", err)
|
||||
fmt.Printf("%s\n", j)
|
||||
}
|
||||
|
||||
switch r := obj.(type) {
|
||||
case chan api.Pin:
|
||||
for o := range r {
|
||||
j, err := json.MarshalIndent(o, "", " ")
|
||||
checkErr("generating json output", err)
|
||||
fmt.Printf("%s\n", j)
|
||||
print(o)
|
||||
}
|
||||
case chan api.GlobalPinInfo:
|
||||
for o := range r {
|
||||
print(o)
|
||||
}
|
||||
default:
|
||||
j, err := json.MarshalIndent(obj, "", " ")
|
||||
checkErr("generating json output", err)
|
||||
fmt.Printf("%s\n", j)
|
||||
print(obj)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -82,8 +88,8 @@ func textFormatObject(resp interface{}) {
|
|||
for _, item := range r {
|
||||
textFormatObject(item)
|
||||
}
|
||||
case []api.GlobalPinInfo:
|
||||
for _, item := range r {
|
||||
case chan api.GlobalPinInfo:
|
||||
for item := range r {
|
||||
textFormatObject(item)
|
||||
}
|
||||
case chan api.Pin:
|
||||
|
|
|
@ -888,21 +888,31 @@ separated list). The following are valid status values:
|
|||
checkErr("parsing cid", err)
|
||||
cids[i] = ci
|
||||
}
|
||||
if len(cids) == 1 {
|
||||
resp, cerr := globalClient.Status(ctx, cids[0], c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
} else if len(cids) > 1 {
|
||||
resp, cerr := globalClient.StatusCids(ctx, cids, c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
} else {
|
||||
filterFlag := c.String("filter")
|
||||
filter := api.TrackerStatusFromString(c.String("filter"))
|
||||
if filter == api.TrackerStatusUndefined && filterFlag != "" {
|
||||
checkErr("parsing filter flag", errors.New("invalid filter name"))
|
||||
out := make(chan api.GlobalPinInfo, 1024)
|
||||
chErr := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(chErr)
|
||||
|
||||
if len(cids) == 1 {
|
||||
resp, cerr := globalClient.Status(ctx, cids[0], c.Bool("local"))
|
||||
out <- resp
|
||||
chErr <- cerr
|
||||
close(out)
|
||||
} else if len(cids) > 1 {
|
||||
chErr <- globalClient.StatusCids(ctx, cids, c.Bool("local"), out)
|
||||
} else {
|
||||
filterFlag := c.String("filter")
|
||||
filter := api.TrackerStatusFromString(c.String("filter"))
|
||||
if filter == api.TrackerStatusUndefined && filterFlag != "" {
|
||||
checkErr("parsing filter flag", errors.New("invalid filter name"))
|
||||
}
|
||||
chErr <- globalClient.StatusAll(ctx, filter, c.Bool("local"), out)
|
||||
}
|
||||
resp, cerr := globalClient.StatusAll(ctx, filter, c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
}
|
||||
}()
|
||||
|
||||
formatResponse(c, out, nil)
|
||||
err := <-chErr
|
||||
formatResponse(c, nil, err)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
@ -932,8 +942,15 @@ operations on the contacted peer (as opposed to on every peer).
|
|||
resp, cerr := globalClient.Recover(ctx, ci, c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
} else {
|
||||
resp, cerr := globalClient.RecoverAll(ctx, c.Bool("local"))
|
||||
formatResponse(c, resp, cerr)
|
||||
out := make(chan api.GlobalPinInfo, 1024)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
errCh <- globalClient.RecoverAll(ctx, c.Bool("local"), out)
|
||||
}()
|
||||
formatResponse(c, out, nil)
|
||||
err := <-errCh
|
||||
formatResponse(c, nil, err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
|
|
@ -493,14 +493,17 @@ func printStatusOnline(absPath, clusterName string) error {
|
|||
if err != nil {
|
||||
return cli.Exit(errors.Wrap(err, "error creating client"), 1)
|
||||
}
|
||||
gpis, err := client.StatusAll(ctx, 0, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// do not return errors after this.
|
||||
out := make(chan api.GlobalPinInfo, 1024)
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
errCh <- client.StatusAll(ctx, 0, true, out)
|
||||
}()
|
||||
|
||||
var pid string
|
||||
for _, gpi := range gpis {
|
||||
for gpi := range out {
|
||||
if pid == "" { // do this once
|
||||
// PeerMap will only have one key
|
||||
for k := range gpi.PeerMap {
|
||||
|
@ -511,7 +514,8 @@ func printStatusOnline(absPath, clusterName string) error {
|
|||
pinInfo := gpi.PeerMap[pid]
|
||||
printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error)
|
||||
}
|
||||
return nil
|
||||
err = <-errCh
|
||||
return err
|
||||
}
|
||||
|
||||
func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
|
||||
|
@ -528,14 +532,20 @@ func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pins, err := st.List(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for pin := range pins {
|
||||
|
||||
out := make(chan api.Pin, 1024)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
errCh <- st.List(context.Background(), out)
|
||||
}()
|
||||
|
||||
for pin := range out {
|
||||
printPin(pin.Cid, "offline", pin.Name, "")
|
||||
}
|
||||
return nil
|
||||
|
||||
err = <-errCh
|
||||
return err
|
||||
}
|
||||
|
||||
func printPin(c cid.Cid, status, name, err string) {
|
||||
|
|
|
@ -222,16 +222,22 @@ func importState(r io.Reader, st state.State, opts api.PinOptions) error {
|
|||
|
||||
// ExportState saves a json representation of a state
|
||||
func exportState(w io.Writer, st state.State) error {
|
||||
pins, err := st.List(context.Background())
|
||||
out := make(chan api.Pin, 10000)
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
errCh <- st.List(context.Background(), out)
|
||||
}()
|
||||
var err error
|
||||
enc := json.NewEncoder(w)
|
||||
for pin := range out {
|
||||
if err == nil {
|
||||
err = enc.Encode(pin)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc := json.NewEncoder(w)
|
||||
for pin := range pins {
|
||||
err := enc.Encode(pin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
err = <-errCh
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -125,14 +125,14 @@ func TestConsensusPin(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 10)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -186,14 +186,16 @@ func TestConsensusUpdate(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
// Channel will not block sending because plenty of space
|
||||
out := make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -243,14 +245,15 @@ func TestConsensusAddRmPeer(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -310,14 +313,15 @@ func TestConsensusDistrustPeer(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 10)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -372,14 +376,15 @@ func TestOfflineState(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ch, err := offlineState.List(ctx)
|
||||
out := make(chan api.Pin, 100)
|
||||
err = offlineState.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -412,14 +417,15 @@ func TestBatching(t *testing.T) {
|
|||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -430,14 +436,15 @@ func TestBatching(t *testing.T) {
|
|||
// Trigger batch auto-commit by time
|
||||
time.Sleep(time.Second)
|
||||
|
||||
ch, err = st.List(ctx)
|
||||
out = make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pins = nil
|
||||
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -456,13 +463,14 @@ func TestBatching(t *testing.T) {
|
|||
// Give a chance for things to persist
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
|
||||
ch, err = st.List(ctx)
|
||||
out = make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pins = nil
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -472,12 +480,14 @@ func TestBatching(t *testing.T) {
|
|||
|
||||
// wait for the last pin
|
||||
time.Sleep(time.Second)
|
||||
ch, err = st.List(ctx)
|
||||
|
||||
out = make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pins = nil
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
|
|
@ -99,13 +99,14 @@ func TestConsensusPin(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 10)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -154,13 +155,14 @@ func TestConsensusUpdate(t *testing.T) {
|
|||
t.Fatal("error getting state:", err)
|
||||
}
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 10)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -330,13 +332,15 @@ func TestRaftLatestSnapshot(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal("Snapshot bytes returned could not restore to state: ", err)
|
||||
}
|
||||
ch, err := snapState.List(ctx)
|
||||
|
||||
out := make(chan api.Pin, 100)
|
||||
err = snapState.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
|
|
@ -27,13 +27,14 @@ func TestApplyToPin(t *testing.T) {
|
|||
}
|
||||
op.ApplyTo(st)
|
||||
|
||||
ch, err := st.List(ctx)
|
||||
out := make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var pins []api.Pin
|
||||
for p := range ch {
|
||||
for p := range out {
|
||||
pins = append(pins, p)
|
||||
}
|
||||
|
||||
|
@ -59,11 +60,13 @@ func TestApplyToUnpin(t *testing.T) {
|
|||
}
|
||||
st.Add(ctx, testPin(test.Cid1))
|
||||
op.ApplyTo(st)
|
||||
pins, err := st.List(ctx)
|
||||
|
||||
out := make(chan api.Pin, 100)
|
||||
err = st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(pins) != 0 {
|
||||
if len(out) != 0 {
|
||||
t.Error("the state was not modified correctly")
|
||||
}
|
||||
}
|
||||
|
|
16
go.mod
16
go.mod
|
@ -18,7 +18,7 @@ require (
|
|||
github.com/ipfs/go-cid v0.1.0
|
||||
github.com/ipfs/go-datastore v0.5.1
|
||||
github.com/ipfs/go-ds-badger v0.3.0
|
||||
github.com/ipfs/go-ds-crdt v0.3.3
|
||||
github.com/ipfs/go-ds-crdt v0.3.4
|
||||
github.com/ipfs/go-ds-leveldb v0.5.0
|
||||
github.com/ipfs/go-fs-lock v0.0.7
|
||||
github.com/ipfs/go-ipfs-api v0.3.0
|
||||
|
@ -30,10 +30,10 @@ require (
|
|||
github.com/ipfs/go-ipfs-pinner v0.2.1
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1
|
||||
github.com/ipfs/go-ipld-cbor v0.0.6
|
||||
github.com/ipfs/go-ipld-format v0.2.0
|
||||
github.com/ipfs/go-ipld-format v0.3.0
|
||||
github.com/ipfs/go-ipns v0.1.2
|
||||
github.com/ipfs/go-log/v2 v2.5.0
|
||||
github.com/ipfs/go-merkledag v0.5.1
|
||||
github.com/ipfs/go-merkledag v0.6.0
|
||||
github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164
|
||||
github.com/ipfs/go-path v0.2.2
|
||||
github.com/ipfs/go-unixfs v0.3.1
|
||||
|
@ -45,7 +45,7 @@ require (
|
|||
github.com/libp2p/go-libp2p-connmgr v0.3.1
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1
|
||||
github.com/libp2p/go-libp2p-core v0.13.0
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.0
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.1
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1
|
||||
github.com/libp2p/go-libp2p-http v0.2.1
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.15.0
|
||||
|
@ -119,14 +119,14 @@ require (
|
|||
github.com/huin/goupnp v1.0.2 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/go-bitfield v1.0.0 // indirect
|
||||
github.com/ipfs/go-bitswap v0.5.1 // indirect
|
||||
github.com/ipfs/go-blockservice v0.2.1 // indirect
|
||||
github.com/ipfs/go-bitswap v0.6.0 // indirect
|
||||
github.com/ipfs/go-blockservice v0.3.0 // indirect
|
||||
github.com/ipfs/go-cidutil v0.0.2 // indirect
|
||||
github.com/ipfs/go-fetcher v1.6.1 // indirect
|
||||
github.com/ipfs/go-ipfs-blockstore v1.1.2 // indirect
|
||||
github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.2.0 // indirect
|
||||
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipfs-provider v0.7.1 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||
|
|
26
go.sum
26
go.sum
|
@ -424,8 +424,9 @@ github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiL
|
|||
github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps=
|
||||
github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
|
||||
github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI=
|
||||
github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco=
|
||||
github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo=
|
||||
github.com/ipfs/go-bitswap v0.6.0 h1:f2rc6GZtoSFhEIzQmddgGiel9xntj02Dg0ZNf2hSC+w=
|
||||
github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA=
|
||||
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
|
||||
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
|
||||
github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc=
|
||||
|
@ -434,8 +435,9 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
|
|||
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
|
||||
github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
|
||||
github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
|
||||
github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ=
|
||||
github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8=
|
||||
github.com/ipfs/go-blockservice v0.3.0 h1:cDgcZ+0P0Ih3sl8+qjFr2sVaMdysg/YZpLj5WJ8kiiw=
|
||||
github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk=
|
||||
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
|
@ -469,8 +471,8 @@ github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBR
|
|||
github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA=
|
||||
github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro=
|
||||
github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek=
|
||||
github.com/ipfs/go-ds-crdt v0.3.3 h1:Q7fj+bm/gCfHte3axLQuCEzK1Uhsxgf065WLRvfeb0w=
|
||||
github.com/ipfs/go-ds-crdt v0.3.3/go.mod h1:rcfJixHEd+hIWcu/8SecC/lVlNcAkhE6DNgRKPd1xgU=
|
||||
github.com/ipfs/go-ds-crdt v0.3.4 h1:O/dFBkxxXxNO9cjfQwFQHTsoehfJtV1GNAhuRmLh2Dg=
|
||||
github.com/ipfs/go-ds-crdt v0.3.4/go.mod h1:bFHBkP56kWufO55QxAKT7qZqz23thrh7FN5l+hYTHa4=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
|
||||
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
|
||||
github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
|
||||
|
@ -488,8 +490,9 @@ github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma
|
|||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw=
|
||||
github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
|
||||
|
@ -510,8 +513,9 @@ github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFq
|
|||
github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo=
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.2.0 h1:2PF4o4A7W656rC0RxuhUace997FTcDTcIQ6NoEtyjAI=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY=
|
||||
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
|
||||
github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84=
|
||||
|
@ -541,8 +545,9 @@ github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eB
|
|||
github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA=
|
||||
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
|
||||
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
|
||||
github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA=
|
||||
github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs=
|
||||
github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ=
|
||||
github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM=
|
||||
github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA=
|
||||
github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI=
|
||||
github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI=
|
||||
|
@ -565,8 +570,9 @@ github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOL
|
|||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk=
|
||||
github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4=
|
||||
github.com/ipfs/go-merkledag v0.6.0 h1:oV5WT2321tS4YQVOPgIrWHvJ0lJobRTerU+i9nmUCuA=
|
||||
github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
||||
github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 h1:0ATu9s5KktHhm8aYRSe1ysOJPik3dRwU/uag1Bcz+tg=
|
||||
|
@ -774,8 +780,8 @@ github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKB
|
|||
github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo=
|
||||
github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.1.0/go.mod h1:DrswTLnu7qjLgbqe4fekX4ISoPiHUqtA45thTsJdE1w=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.0 h1:1ww39zPEclHh8p1Exk882Xhy3CK2gW+JZYd+6NZp+q0=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.0/go.mod h1:sRz9ybP9rlOkJB1v65SMLr+NUEPB/ioLZn26MWIV4DU=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.1 h1:ZmqQIgHccgh/Ff1kS3ZlwATZRLvtuRUd633/MLWAx20=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.3.1/go.mod h1:sRz9ybP9rlOkJB1v65SMLr+NUEPB/ioLZn26MWIV4DU=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.0/go.mod h1:pLBQu8db7vBMNINGsAwLL/ZCE8wng5V1FThoaE5rNjc=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1 h1:XlwohsPn6uopGluEWs1Csv1QCEjrTXf2ZQagzZ5paAg=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=
|
||||
|
|
|
@ -5,6 +5,7 @@ package numpin
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
|
||||
|
@ -19,7 +20,9 @@ var MetricName = "numpin"
|
|||
// Informer is a simple object to implement the ipfscluster.Informer
|
||||
// and Component interfaces
|
||||
type Informer struct {
|
||||
config *Config
|
||||
config *Config
|
||||
|
||||
mu sync.Mutex
|
||||
rpcClient *rpc.Client
|
||||
}
|
||||
|
||||
|
@ -38,7 +41,9 @@ func NewInformer(cfg *Config) (*Informer, error) {
|
|||
// SetClient provides us with an rpc.Client which allows
|
||||
// contacting other components in the cluster.
|
||||
func (npi *Informer) SetClient(c *rpc.Client) {
|
||||
npi.mu.Lock()
|
||||
npi.rpcClient = c
|
||||
npi.mu.Unlock()
|
||||
}
|
||||
|
||||
// Shutdown is called on cluster shutdown. We just invalidate
|
||||
|
@ -47,7 +52,9 @@ func (npi *Informer) Shutdown(ctx context.Context) error {
|
|||
_, span := trace.StartSpan(ctx, "informer/numpin/Shutdown")
|
||||
defer span.End()
|
||||
|
||||
npi.mu.Lock()
|
||||
npi.rpcClient = nil
|
||||
npi.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -63,7 +70,11 @@ func (npi *Informer) GetMetrics(ctx context.Context) []api.Metric {
|
|||
ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric")
|
||||
defer span.End()
|
||||
|
||||
if npi.rpcClient == nil {
|
||||
npi.mu.Lock()
|
||||
rpcClient := npi.rpcClient
|
||||
npi.mu.Unlock()
|
||||
|
||||
if rpcClient == nil {
|
||||
return []api.Metric{
|
||||
{
|
||||
Valid: false,
|
||||
|
@ -71,24 +82,39 @@ func (npi *Informer) GetMetrics(ctx context.Context) []api.Metric {
|
|||
}
|
||||
}
|
||||
|
||||
pinMap := make(map[string]api.IPFSPinStatus)
|
||||
|
||||
// make use of the RPC API to obtain information
|
||||
// about the number of pins in IPFS. See RPCAPI docs.
|
||||
err := npi.rpcClient.CallContext(
|
||||
ctx,
|
||||
"", // Local call
|
||||
"IPFSConnector", // Service name
|
||||
"PinLs", // Method name
|
||||
"recursive", // in arg
|
||||
&pinMap, // out arg
|
||||
)
|
||||
in := make(chan []string, 1)
|
||||
in <- []string{"recursive", "direct"}
|
||||
close(in)
|
||||
out := make(chan api.IPFSPinInfo, 1024)
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
err := rpcClient.Stream(
|
||||
ctx,
|
||||
"", // Local call
|
||||
"IPFSConnector", // Service name
|
||||
"PinLs", // Method name
|
||||
in,
|
||||
out,
|
||||
)
|
||||
errCh <- err
|
||||
}()
|
||||
|
||||
n := 0
|
||||
for range out {
|
||||
n++
|
||||
}
|
||||
|
||||
err := <-errCh
|
||||
|
||||
valid := err == nil
|
||||
|
||||
m := api.Metric{
|
||||
Name: MetricName,
|
||||
Value: fmt.Sprintf("%d", len(pinMap)),
|
||||
Value: fmt.Sprintf("%d", n),
|
||||
Valid: valid,
|
||||
Partitionable: false,
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/test"
|
||||
|
||||
rpc "github.com/libp2p/go-libp2p-gorpc"
|
||||
)
|
||||
|
@ -21,11 +22,10 @@ func mockRPCClient(t *testing.T) *rpc.Client {
|
|||
return c
|
||||
}
|
||||
|
||||
func (mock *mockService) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
|
||||
*out = map[string]api.IPFSPinStatus{
|
||||
"QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa": api.IPFSPinStatusRecursive,
|
||||
"QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6": api.IPFSPinStatusRecursive,
|
||||
}
|
||||
func (mock *mockService) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
|
||||
out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid1), Type: api.IPFSPinStatusRecursive}
|
||||
out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid2), Type: api.IPFSPinStatusRecursive}
|
||||
close(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,8 @@ type IPFSConnector interface {
|
|||
Pin(context.Context, api.Pin) error
|
||||
Unpin(context.Context, cid.Cid) error
|
||||
PinLsCid(context.Context, api.Pin) (api.IPFSPinStatus, error)
|
||||
PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error)
|
||||
// PinLs returns pins in the pinset of the given types (recursive, direct...)
|
||||
PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error
|
||||
// ConnectSwarms make sure this peer's IPFS daemon is connected to
|
||||
// other peers IPFS daemons.
|
||||
ConnectSwarms(context.Context) error
|
||||
|
@ -121,12 +122,11 @@ type PinTracker interface {
|
|||
Untrack(context.Context, cid.Cid) error
|
||||
// StatusAll returns the list of pins with their local status. Takes a
|
||||
// filter to specify which statuses to report.
|
||||
StatusAll(context.Context, api.TrackerStatus) []api.PinInfo
|
||||
StatusAll(context.Context, api.TrackerStatus, chan<- api.PinInfo) error
|
||||
// Status returns the local status of a given Cid.
|
||||
Status(context.Context, cid.Cid) api.PinInfo
|
||||
// RecoverAll calls Recover() for all pins tracked. Returns only
|
||||
// informations for retriggered pins.
|
||||
RecoverAll(context.Context) ([]api.PinInfo, error)
|
||||
// RecoverAll calls Recover() for all pins tracked.
|
||||
RecoverAll(context.Context, chan<- api.PinInfo) error
|
||||
// Recover retriggers a Pin/Unpin operation in a Cids with error status.
|
||||
Recover(context.Context, cid.Cid) (api.PinInfo, error)
|
||||
}
|
||||
|
|
|
@ -430,6 +430,48 @@ func shutdownCluster(t *testing.T, c *Cluster, m *test.IpfsMock) {
|
|||
m.Close()
|
||||
}
|
||||
|
||||
func collectGlobalPinInfos(t *testing.T, out <-chan api.GlobalPinInfo, timeout time.Duration) []api.GlobalPinInfo {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
var gpis []api.GlobalPinInfo
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
return gpis
|
||||
case gpi, ok := <-out:
|
||||
if !ok {
|
||||
return gpis
|
||||
}
|
||||
gpis = append(gpis, gpi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectPinInfos(t *testing.T, out <-chan api.PinInfo) []api.PinInfo {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var pis []api.PinInfo
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
return pis
|
||||
case pi, ok := <-out:
|
||||
if !ok {
|
||||
return pis
|
||||
}
|
||||
pis = append(pis, pi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runF(t *testing.T, clusters []*Cluster, f func(*testing.T, *Cluster)) {
|
||||
t.Helper()
|
||||
var wg sync.WaitGroup
|
||||
|
@ -654,12 +696,22 @@ func TestClustersPin(t *testing.T) {
|
|||
}
|
||||
switch consensus {
|
||||
case "crdt":
|
||||
time.Sleep(20 * time.Second)
|
||||
time.Sleep(10 * time.Second)
|
||||
default:
|
||||
delay()
|
||||
}
|
||||
fpinned := func(t *testing.T, c *Cluster) {
|
||||
status := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
out := make(chan api.PinInfo, 10)
|
||||
|
||||
go func() {
|
||||
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
status := collectPinInfos(t, out)
|
||||
|
||||
for _, v := range status {
|
||||
if v.Status != api.TrackerStatusPinned {
|
||||
t.Errorf("%s should have been pinned but it is %s", v.Cid, v.Status)
|
||||
|
@ -672,7 +724,7 @@ func TestClustersPin(t *testing.T) {
|
|||
runF(t, clusters, fpinned)
|
||||
|
||||
// Unpin everything
|
||||
pinList, err := clusters[0].Pins(ctx)
|
||||
pinList, err := clusters[0].pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -692,7 +744,7 @@ func TestClustersPin(t *testing.T) {
|
|||
|
||||
switch consensus {
|
||||
case "crdt":
|
||||
time.Sleep(20 * time.Second)
|
||||
time.Sleep(10 * time.Second)
|
||||
default:
|
||||
delay()
|
||||
}
|
||||
|
@ -708,7 +760,15 @@ func TestClustersPin(t *testing.T) {
|
|||
delay()
|
||||
|
||||
funpinned := func(t *testing.T, c *Cluster) {
|
||||
status := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
out := make(chan api.PinInfo)
|
||||
go func() {
|
||||
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
status := collectPinInfos(t, out)
|
||||
for _, v := range status {
|
||||
t.Errorf("%s should have been unpinned but it is %s", v.Cid, v.Status)
|
||||
}
|
||||
|
@ -852,10 +912,15 @@ func TestClustersStatusAll(t *testing.T) {
|
|||
pinDelay()
|
||||
// Global status
|
||||
f := func(t *testing.T, c *Cluster) {
|
||||
statuses, err := c.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
out := make(chan api.GlobalPinInfo, 10)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
statuses := collectGlobalPinInfos(t, out, 5*time.Second)
|
||||
if len(statuses) != 1 {
|
||||
t.Fatal("bad status. Expected one item")
|
||||
}
|
||||
|
@ -920,10 +985,16 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
statuses, err := c.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
out := make(chan api.GlobalPinInfo, 10)
|
||||
go func() {
|
||||
err := c.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
statuses := collectGlobalPinInfos(t, out, 5*time.Second)
|
||||
|
||||
if len(statuses) != 1 {
|
||||
t.Fatal("bad status. Expected one item")
|
||||
}
|
||||
|
@ -1124,11 +1195,15 @@ func TestClustersRecoverAll(t *testing.T) {
|
|||
|
||||
pinDelay()
|
||||
|
||||
gInfos, err := clusters[rand.Intn(nClusters)].RecoverAll(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
delay()
|
||||
out := make(chan api.GlobalPinInfo)
|
||||
go func() {
|
||||
err := clusters[rand.Intn(nClusters)].RecoverAll(ctx, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
gInfos := collectGlobalPinInfos(t, out, 5*time.Second)
|
||||
|
||||
if len(gInfos) != 1 {
|
||||
t.Error("expected one items")
|
||||
|
@ -1219,7 +1294,15 @@ func TestClustersReplicationOverall(t *testing.T) {
|
|||
|
||||
f := func(t *testing.T, c *Cluster) {
|
||||
// confirm that the pintracker state matches the current global state
|
||||
pinfos := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
out := make(chan api.PinInfo, 100)
|
||||
|
||||
go func() {
|
||||
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
pinfos := collectPinInfos(t, out)
|
||||
if len(pinfos) != nClusters {
|
||||
t.Error("Pinfos does not have the expected pins")
|
||||
}
|
||||
|
@ -1243,11 +1326,14 @@ func TestClustersReplicationOverall(t *testing.T) {
|
|||
t.Errorf("%s: Expected 1 remote pin but got %d", c.id.String(), numRemote)
|
||||
}
|
||||
|
||||
pins, err := c.Pins(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, pin := range pins {
|
||||
outPins := make(chan api.Pin)
|
||||
go func() {
|
||||
err := c.Pins(ctx, outPins)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
for pin := range outPins {
|
||||
allocs := pin.Allocations
|
||||
if len(allocs) != nClusters-1 {
|
||||
t.Errorf("Allocations are [%s]", allocs)
|
||||
|
@ -1623,7 +1709,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
|
|||
// Let the pin arrive
|
||||
pinDelay()
|
||||
|
||||
pinList, err := clusters[j].Pins(ctx)
|
||||
pinList, err := clusters[j].pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1641,7 +1727,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
|
|||
|
||||
pinDelay()
|
||||
|
||||
pinList2, err := clusters[j].Pins(ctx)
|
||||
pinList2, err := clusters[j].pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -2131,7 +2217,7 @@ func TestClusterPinsWithExpiration(t *testing.T) {
|
|||
|
||||
pinDelay()
|
||||
|
||||
pins, err := cl.Pins(ctx)
|
||||
pins, err := cl.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -2154,7 +2240,7 @@ func TestClusterPinsWithExpiration(t *testing.T) {
|
|||
pinDelay()
|
||||
|
||||
// state sync should have unpinned expired pin
|
||||
pins, err = cl.Pins(ctx)
|
||||
pins, err = cl.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -73,19 +73,25 @@ type ipfsError struct {
|
|||
|
||||
func (ie ipfsError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"IPFS request unsuccessful (%s). Code: %d. Message: %s",
|
||||
"IPFS error (%s). Code: %d. Message: %s",
|
||||
ie.path,
|
||||
ie.code,
|
||||
ie.Message,
|
||||
)
|
||||
}
|
||||
|
||||
type ipfsPinType struct {
|
||||
Type string
|
||||
type ipfsUnpinnedError ipfsError
|
||||
|
||||
func (unpinned ipfsUnpinnedError) Is(target error) bool {
|
||||
ierr, ok := target.(ipfsError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(ierr.Message, "not pinned")
|
||||
}
|
||||
|
||||
type ipfsPinLsResp struct {
|
||||
Keys map[string]ipfsPinType
|
||||
func (unpinned ipfsUnpinnedError) Error() string {
|
||||
return ipfsError(unpinned).Error()
|
||||
}
|
||||
|
||||
type ipfsIDResp struct {
|
||||
|
@ -493,33 +499,62 @@ func (ipfs *Connector) Unpin(ctx context.Context, hash cid.Cid) error {
|
|||
}
|
||||
|
||||
// PinLs performs a "pin ls --type typeFilter" request against the configured
|
||||
// IPFS daemon and returns a map of cid strings and their status.
|
||||
func (ipfs *Connector) PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error) {
|
||||
// IPFS daemon and sends the results on the given channel. Returns when done.
|
||||
func (ipfs *Connector) PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error {
|
||||
defer close(out)
|
||||
bodies := make([]io.ReadCloser, len(typeFilters))
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLs")
|
||||
defer span.End()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
|
||||
defer cancel()
|
||||
body, err := ipfs.postCtx(ctx, "pin/ls?type="+typeFilter, "", nil)
|
||||
|
||||
// Some error talking to the daemon
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var err error
|
||||
|
||||
nextFilter:
|
||||
for i, typeFilter := range typeFilters {
|
||||
// Post and read streaming response
|
||||
path := "pin/ls?stream=true&type=" + typeFilter
|
||||
bodies[i], err = ipfs.postCtxStreamResponse(ctx, path, "", nil)
|
||||
if err != nil {
|
||||
logger.Error("error querying pinset: %s", err)
|
||||
return err
|
||||
}
|
||||
defer bodies[i].Close()
|
||||
|
||||
dec := json.NewDecoder(bodies[i])
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err())
|
||||
logger.Error(err)
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
var ipfsPin api.IPFSPinInfo
|
||||
err = dec.Decode(&ipfsPin)
|
||||
if err == io.EOF {
|
||||
break nextFilter
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error decoding ipfs pin: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err())
|
||||
logger.Error(err)
|
||||
return err
|
||||
case out <- ipfsPin:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var res ipfsPinLsResp
|
||||
err = json.Unmarshal(body, &res)
|
||||
if err != nil {
|
||||
logger.Error("parsing pin/ls response")
|
||||
logger.Error(string(body))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statusMap := make(map[string]api.IPFSPinStatus)
|
||||
for k, v := range res.Keys {
|
||||
statusMap[k] = api.IPFSPinStatusFromString(v.Type)
|
||||
}
|
||||
return statusMap, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// PinLsCid performs a "pin ls <hash>" request. It will use "type=recursive" or
|
||||
|
@ -532,35 +567,31 @@ func (ipfs *Connector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinSt
|
|||
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
|
||||
defer cancel()
|
||||
|
||||
if !pin.Defined() {
|
||||
return api.IPFSPinStatusBug, errors.New("calling PinLsCid without a defined CID")
|
||||
}
|
||||
|
||||
pinType := pin.MaxDepth.ToPinMode().String()
|
||||
lsPath := fmt.Sprintf("pin/ls?arg=%s&type=%s", pin.Cid, pinType)
|
||||
body, err := ipfs.postCtx(ctx, lsPath, "", nil)
|
||||
if body == nil && err != nil { // Network error, daemon down
|
||||
return api.IPFSPinStatusError, err
|
||||
}
|
||||
|
||||
if err != nil { // we could not find the pin
|
||||
return api.IPFSPinStatusUnpinned, nil
|
||||
}
|
||||
|
||||
var res ipfsPinLsResp
|
||||
err = json.Unmarshal(body, &res)
|
||||
lsPath := fmt.Sprintf("pin/ls?stream=true&arg=%s&type=%s", pin.Cid, pinType)
|
||||
body, err := ipfs.postCtxStreamResponse(ctx, lsPath, "", nil)
|
||||
if err != nil {
|
||||
logger.Error("error parsing pin/ls?arg=cid response:")
|
||||
logger.Error(string(body))
|
||||
if errors.Is(ipfsUnpinnedError{}, err) {
|
||||
return api.IPFSPinStatusUnpinned, nil
|
||||
}
|
||||
return api.IPFSPinStatusError, err
|
||||
}
|
||||
defer body.Close()
|
||||
|
||||
var res api.IPFSPinInfo
|
||||
dec := json.NewDecoder(body)
|
||||
|
||||
err = dec.Decode(&res)
|
||||
if err != nil {
|
||||
logger.Error("error parsing pin/ls?arg=cid response")
|
||||
return api.IPFSPinStatusError, err
|
||||
}
|
||||
|
||||
// We do not know what string format the returned key has so
|
||||
// we parse as CID. There should only be one returned key.
|
||||
for k, pinObj := range res.Keys {
|
||||
c, err := cid.Decode(k)
|
||||
if err != nil || !c.Equals(pin.Cid) {
|
||||
continue
|
||||
}
|
||||
return api.IPFSPinStatusFromString(pinObj.Type), nil
|
||||
}
|
||||
return api.IPFSPinStatusError, errors.New("expected to find the pin in the response")
|
||||
return res.Type, nil
|
||||
}
|
||||
|
||||
func (ipfs *Connector) doPostCtx(ctx context.Context, client *http.Client, apiURL, path string, contentType string, postBody io.Reader) (*http.Response, error) {
|
||||
|
@ -601,7 +632,7 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
|
|||
|
||||
// No error response with useful message from ipfs
|
||||
return nil, fmt.Errorf(
|
||||
"IPFS request unsuccessful (%s). Code %d. Body: %s",
|
||||
"IPFS request failed (is it running?) (%s). Code %d: %s",
|
||||
path,
|
||||
res.StatusCode,
|
||||
string(body))
|
||||
|
@ -611,18 +642,13 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
|
|||
// the ipfs daemon, reads the full body of the response and
|
||||
// returns it after checking for errors.
|
||||
func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType string, postBody io.Reader) ([]byte, error) {
|
||||
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, contentType, postBody)
|
||||
rdr, err := ipfs.postCtxStreamResponse(ctx, path, contentType, postBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
defer rdr.Close()
|
||||
|
||||
errBody, err := checkResponse(path, res)
|
||||
if err != nil {
|
||||
return errBody, err
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := ioutil.ReadAll(rdr)
|
||||
if err != nil {
|
||||
logger.Errorf("error reading response body: %s", err)
|
||||
return nil, err
|
||||
|
@ -630,6 +656,21 @@ func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType str
|
|||
return body, nil
|
||||
}
|
||||
|
||||
// postCtxStreamResponse makes a POST request against the ipfs daemon, and
|
||||
// returns the body reader after checking the request for errros.
|
||||
func (ipfs *Connector) postCtxStreamResponse(ctx context.Context, path string, contentType string, postBody io.Reader) (io.ReadCloser, error) {
|
||||
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, contentType, postBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = checkResponse(path, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// apiURL is a short-hand for building the url of the IPFS
|
||||
// daemon API.
|
||||
func (ipfs *Connector) apiURL() string {
|
||||
|
|
|
@ -219,6 +219,27 @@ func TestIPFSPinLsCid_DifferentEncoding(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func collectPins(t *testing.T, pch <-chan api.IPFSPinInfo) []api.IPFSPinInfo {
|
||||
t.Helper()
|
||||
|
||||
var pins []api.IPFSPinInfo
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal(ctx.Err())
|
||||
return nil
|
||||
case p, ok := <-pch:
|
||||
if !ok {
|
||||
return pins
|
||||
}
|
||||
pins = append(pins, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPFSPinLs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ipfs, mock := testIPFSConnector(t)
|
||||
|
@ -229,16 +250,21 @@ func TestIPFSPinLs(t *testing.T) {
|
|||
|
||||
ipfs.Pin(ctx, api.PinCid(c))
|
||||
ipfs.Pin(ctx, api.PinCid(c2))
|
||||
ipsMap, err := ipfs.PinLs(ctx, "")
|
||||
if err != nil {
|
||||
t.Error("should not error")
|
||||
pinCh := make(chan api.IPFSPinInfo, 10)
|
||||
go func() {
|
||||
err := ipfs.PinLs(ctx, []string{""}, pinCh)
|
||||
if err != nil {
|
||||
t.Error("should not error")
|
||||
}
|
||||
}()
|
||||
|
||||
pins := collectPins(t, pinCh)
|
||||
|
||||
if len(pins) != 2 {
|
||||
t.Fatal("the pin list does not contain the expected number of keys")
|
||||
}
|
||||
|
||||
if len(ipsMap) != 2 {
|
||||
t.Fatal("the map does not contain expected keys")
|
||||
}
|
||||
|
||||
if !ipsMap[test.Cid1.String()].IsPinned(-1) || !ipsMap[test.Cid2.String()].IsPinned(-1) {
|
||||
if !pins[0].Type.IsPinned(-1) || !pins[1].Type.IsPinned(-1) {
|
||||
t.Error("c1 and c2 should appear pinned")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ func TestClustersPeerAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check that they are part of the consensus
|
||||
pins, err := c.Pins(ctx)
|
||||
pins, err := c.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -463,7 +463,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
|
|||
// Find out which pins are associated to the chosen peer.
|
||||
interestingCids := []cid.Cid{}
|
||||
|
||||
pins, err := chosen.Pins(ctx)
|
||||
pins, err := chosen.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ func TestClustersPeerJoin(t *testing.T) {
|
|||
if len(peers) != nClusters {
|
||||
t.Error("all peers should be connected")
|
||||
}
|
||||
pins, err := c.Pins(ctx)
|
||||
pins, err := c.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -575,7 +575,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
|
|||
if len(peers) != nClusters {
|
||||
t.Error("all peers should be connected")
|
||||
}
|
||||
pins, err := c.Pins(ctx)
|
||||
pins, err := c.pinsSlice(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func (opt *OperationTracker) SetError(ctx context.Context, c cid.Cid, err error)
|
|||
}
|
||||
}
|
||||
|
||||
func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) api.PinInfo {
|
||||
func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation, ipfs api.IPFSID) api.PinInfo {
|
||||
if op == nil {
|
||||
return api.PinInfo{
|
||||
Cid: cid.Undef,
|
||||
|
@ -162,26 +162,27 @@ func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) a
|
|||
Peer: opt.pid,
|
||||
Name: op.Pin().Name,
|
||||
PinInfoShort: api.PinInfoShort{
|
||||
PeerName: opt.peerName,
|
||||
IPFS: "",
|
||||
Status: op.ToTrackerStatus(),
|
||||
TS: op.Timestamp(),
|
||||
AttemptCount: op.AttemptCount(),
|
||||
PriorityPin: op.PriorityPin(),
|
||||
Error: op.Error(),
|
||||
PeerName: opt.peerName,
|
||||
IPFS: ipfs.ID,
|
||||
IPFSAddresses: ipfs.Addresses,
|
||||
Status: op.ToTrackerStatus(),
|
||||
TS: op.Timestamp(),
|
||||
AttemptCount: op.AttemptCount(),
|
||||
PriorityPin: op.PriorityPin(),
|
||||
Error: op.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a PinInfo object for Cid.
|
||||
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
|
||||
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid, ipfs api.IPFSID) api.PinInfo {
|
||||
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
|
||||
defer span.End()
|
||||
|
||||
opt.mu.RLock()
|
||||
defer opt.mu.RUnlock()
|
||||
op := opt.operations[c]
|
||||
pInfo := opt.unsafePinInfo(ctx, op)
|
||||
pInfo := opt.unsafePinInfo(ctx, op, ipfs)
|
||||
if pInfo.Cid == cid.Undef {
|
||||
pInfo.Cid = c
|
||||
}
|
||||
|
@ -190,7 +191,7 @@ func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
|
|||
|
||||
// GetExists returns a PinInfo object for a Cid only if there exists
|
||||
// an associated Operation.
|
||||
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinInfo, bool) {
|
||||
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid, ipfs api.IPFSID) (api.PinInfo, bool) {
|
||||
ctx, span := trace.StartSpan(ctx, "optracker/GetExists")
|
||||
defer span.End()
|
||||
|
||||
|
@ -200,25 +201,51 @@ func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinI
|
|||
if !ok {
|
||||
return api.PinInfo{}, false
|
||||
}
|
||||
pInfo := opt.unsafePinInfo(ctx, op)
|
||||
pInfo := opt.unsafePinInfo(ctx, op, ipfs)
|
||||
return pInfo, true
|
||||
}
|
||||
|
||||
// GetAll returns PinInfo objects for all known operations.
|
||||
func (opt *OperationTracker) GetAll(ctx context.Context) []api.PinInfo {
|
||||
func (opt *OperationTracker) GetAll(ctx context.Context, ipfs api.IPFSID) []api.PinInfo {
|
||||
ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
|
||||
defer span.End()
|
||||
|
||||
ch := make(chan api.PinInfo, 1024)
|
||||
var pinfos []api.PinInfo
|
||||
opt.mu.RLock()
|
||||
defer opt.mu.RUnlock()
|
||||
for _, op := range opt.operations {
|
||||
pinfo := opt.unsafePinInfo(ctx, op)
|
||||
go opt.GetAllChannel(ctx, api.TrackerStatusUndefined, ipfs, ch)
|
||||
for pinfo := range ch {
|
||||
pinfos = append(pinfos, pinfo)
|
||||
}
|
||||
return pinfos
|
||||
}
|
||||
|
||||
// GetAllChannel returns all known operations that match the filter on the
|
||||
// provided channel. Blocks until done.
|
||||
func (opt *OperationTracker) GetAllChannel(ctx context.Context, filter api.TrackerStatus, ipfs api.IPFSID, out chan<- api.PinInfo) error {
|
||||
defer close(out)
|
||||
|
||||
opt.mu.RLock()
|
||||
defer opt.mu.RUnlock()
|
||||
|
||||
for _, op := range opt.operations {
|
||||
pinfo := opt.unsafePinInfo(ctx, op, ipfs)
|
||||
if pinfo.Status.Match(filter) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("listing operations aborted: %w", ctx.Err())
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("listing operations aborted: %w", ctx.Err())
|
||||
case out <- pinfo:
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanAllDone deletes any operation from the tracker that is in PhaseDone.
|
||||
func (opt *OperationTracker) CleanAllDone(ctx context.Context) {
|
||||
opt.mu.Lock()
|
||||
|
@ -245,13 +272,13 @@ func (opt *OperationTracker) OpContext(ctx context.Context, c cid.Cid) context.C
|
|||
// Operations that matched the provided filter. Note, only supports
|
||||
// filters of type OperationType or Phase, any other type
|
||||
// will result in a nil slice being returned.
|
||||
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []api.PinInfo {
|
||||
func (opt *OperationTracker) Filter(ctx context.Context, ipfs api.IPFSID, filters ...interface{}) []api.PinInfo {
|
||||
var pinfos []api.PinInfo
|
||||
opt.mu.RLock()
|
||||
defer opt.mu.RUnlock()
|
||||
ops := filterOpsMap(ctx, opt.operations, filters)
|
||||
for _, op := range ops {
|
||||
pinfo := opt.unsafePinInfo(ctx, op)
|
||||
pinfo := opt.unsafePinInfo(ctx, op, ipfs)
|
||||
pinfos = append(pinfos, pinfo)
|
||||
}
|
||||
return pinfos
|
||||
|
|
|
@ -126,7 +126,7 @@ func TestOperationTracker_SetError(t *testing.T) {
|
|||
opt := testOperationTracker(t)
|
||||
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
|
||||
opt.SetError(ctx, test.Cid1, errors.New("fake error"))
|
||||
pinfo := opt.Get(ctx, test.Cid1)
|
||||
pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{})
|
||||
if pinfo.Status != api.TrackerStatusPinError {
|
||||
t.Error("should have updated the status")
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func TestOperationTracker_Get(t *testing.T) {
|
|||
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
|
||||
|
||||
t.Run("Get with existing item", func(t *testing.T) {
|
||||
pinfo := opt.Get(ctx, test.Cid1)
|
||||
pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{})
|
||||
if pinfo.Status != api.TrackerStatusPinned {
|
||||
t.Error("bad status")
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func TestOperationTracker_Get(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Get with unexisting item", func(t *testing.T) {
|
||||
pinfo := opt.Get(ctx, test.Cid2)
|
||||
pinfo := opt.Get(ctx, test.Cid2, api.IPFSID{})
|
||||
if pinfo.Status != api.TrackerStatusUnpinned {
|
||||
t.Error("bad status")
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ func TestOperationTracker_GetAll(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
opt := testOperationTracker(t)
|
||||
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress)
|
||||
pinfos := opt.GetAll(ctx)
|
||||
pinfos := opt.GetAll(ctx, api.IPFSID{})
|
||||
if len(pinfos) != 1 {
|
||||
t.Fatal("expected 1 item")
|
||||
}
|
||||
|
|
|
@ -165,6 +165,28 @@ func TestPinTracker_Untrack(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func collectPinInfos(t *testing.T, out chan api.PinInfo) []api.PinInfo {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var pis []api.PinInfo
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error("took too long")
|
||||
return nil
|
||||
case pi, ok := <-out:
|
||||
if !ok {
|
||||
return pis
|
||||
}
|
||||
pis = append(pis, pi)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPinTracker_StatusAll(t *testing.T) {
|
||||
type args struct {
|
||||
c api.Pin
|
||||
|
@ -216,7 +238,16 @@ func TestPinTracker_StatusAll(t *testing.T) {
|
|||
t.Errorf("PinTracker.Track() error = %v", err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
got := tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined)
|
||||
infos := make(chan api.PinInfo)
|
||||
go func() {
|
||||
err := tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined, infos)
|
||||
if err != nil {
|
||||
t.Error()
|
||||
}
|
||||
}()
|
||||
|
||||
got := collectPinInfos(t, infos)
|
||||
|
||||
if len(got) != len(tt.want) {
|
||||
for _, pi := range got {
|
||||
t.Logf("pinfo: %v", pi)
|
||||
|
@ -240,31 +271,6 @@ func TestPinTracker_StatusAll(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkPinTracker_StatusAll(b *testing.B) {
|
||||
type args struct {
|
||||
tracker ipfscluster.PinTracker
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{
|
||||
"basic stateless track",
|
||||
args{
|
||||
testStatelessPinTracker(b),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPinTracker_Status(t *testing.T) {
|
||||
type args struct {
|
||||
c cid.Cid
|
||||
|
@ -350,11 +356,16 @@ func TestPinTracker_RecoverAll(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.args.tracker.RecoverAll(context.Background())
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("PinTracker.RecoverAll() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
infos := make(chan api.PinInfo)
|
||||
go func() {
|
||||
err := tt.args.tracker.RecoverAll(context.Background(), infos)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("PinTracker.RecoverAll() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
got := collectPinInfos(t, infos)
|
||||
|
||||
if len(got) != len(tt.want) {
|
||||
for _, pi := range got {
|
||||
|
|
|
@ -6,6 +6,7 @@ package stateless
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -23,6 +24,8 @@ import (
|
|||
|
||||
var logger = logging.Logger("pintracker")
|
||||
|
||||
const pinsChannelSize = 1024
|
||||
|
||||
var (
|
||||
// ErrFullQueue is the error used when pin or unpin operation channel is full.
|
||||
ErrFullQueue = errors.New("pin/unpin operation queue is full. Try increasing max_pin_queue_size")
|
||||
|
@ -321,36 +324,134 @@ func (spt *Tracker) Untrack(ctx context.Context, c cid.Cid) error {
|
|||
}
|
||||
|
||||
// StatusAll returns information for all Cids pinned to the local IPFS node.
|
||||
func (spt *Tracker) StatusAll(ctx context.Context, filter api.TrackerStatus) []api.PinInfo {
|
||||
func (spt *Tracker) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll")
|
||||
defer span.End()
|
||||
|
||||
pininfos, err := spt.localStatus(ctx, true, filter)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// get all inflight operations from optracker and put them into the
|
||||
// map, deduplicating any existing items with their inflight operation.
|
||||
//
|
||||
// we cannot filter in GetAll, because we are meant to replace items in
|
||||
// pininfos and set the correct status, as otherwise they will remain in
|
||||
// PinError.
|
||||
ipfsid := spt.getIPFSID(ctx)
|
||||
for _, infop := range spt.optracker.GetAll(ctx) {
|
||||
infop.IPFS = ipfsid.ID
|
||||
infop.IPFSAddresses = ipfsid.Addresses
|
||||
pininfos[infop.Cid] = infop
|
||||
|
||||
// Any other states are just operation-tracker states, so we just give
|
||||
// those and return.
|
||||
if !filter.Match(
|
||||
api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned |
|
||||
api.TrackerStatusSharded | api.TrackerStatusRemote) {
|
||||
return spt.optracker.GetAllChannel(ctx, filter, ipfsid, out)
|
||||
}
|
||||
|
||||
var pis []api.PinInfo
|
||||
for _, pi := range pininfos {
|
||||
// Last filter.
|
||||
if pi.Status.Match(filter) {
|
||||
pis = append(pis, pi)
|
||||
defer close(out)
|
||||
|
||||
// get global state - cluster pinset
|
||||
st, err := spt.getState(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var ipfsRecursivePins map[api.Cid]api.IPFSPinStatus
|
||||
// Only query IPFS if we want to status for pinned items
|
||||
if filter.Match(api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned) {
|
||||
ipfsRecursivePins = make(map[api.Cid]api.IPFSPinStatus)
|
||||
// At some point we need a full map of what we have and what
|
||||
// we don't. The IPFS pinset is the smallest thing we can keep
|
||||
// on memory.
|
||||
ipfsPinsCh, err := spt.ipfsPins(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return err
|
||||
}
|
||||
for ipfsPinInfo := range ipfsPinsCh {
|
||||
ipfsRecursivePins[ipfsPinInfo.Cid] = ipfsPinInfo.Type
|
||||
}
|
||||
}
|
||||
return pis
|
||||
|
||||
// Prepare pinset streaming
|
||||
statePins := make(chan api.Pin, pinsChannelSize)
|
||||
err = st.List(ctx, statePins)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// a shorthand for this select.
|
||||
trySend := func(info api.PinInfo) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
case out <- info:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// For every item in the state.
|
||||
for p := range statePins {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
default:
|
||||
}
|
||||
|
||||
// if there is an operation, issue that and move on
|
||||
info, ok := spt.optracker.GetExists(ctx, p.Cid, ipfsid)
|
||||
if ok && filter.Match(info.Status) {
|
||||
if !trySend(info) {
|
||||
return fmt.Errorf("error issuing PinInfo: %w", ctx.Err())
|
||||
}
|
||||
continue // next pin
|
||||
}
|
||||
|
||||
// Preliminary PinInfo for this Pin.
|
||||
info = api.PinInfo{
|
||||
Cid: p.Cid,
|
||||
Name: p.Name,
|
||||
Peer: spt.peerID,
|
||||
Allocations: p.Allocations,
|
||||
Origins: p.Origins,
|
||||
Created: p.Timestamp,
|
||||
Metadata: p.Metadata,
|
||||
|
||||
PinInfoShort: api.PinInfoShort{
|
||||
PeerName: spt.peerName,
|
||||
IPFS: ipfsid.ID,
|
||||
IPFSAddresses: ipfsid.Addresses,
|
||||
Status: api.TrackerStatusUndefined, // TBD
|
||||
TS: p.Timestamp,
|
||||
Error: "",
|
||||
AttemptCount: 0,
|
||||
PriorityPin: false,
|
||||
},
|
||||
}
|
||||
|
||||
ipfsStatus, pinnedInIpfs := ipfsRecursivePins[api.Cid(p.Cid)]
|
||||
|
||||
switch {
|
||||
case p.Type == api.MetaType:
|
||||
info.Status = api.TrackerStatusSharded
|
||||
case p.IsRemotePin(spt.peerID):
|
||||
info.Status = api.TrackerStatusRemote
|
||||
case pinnedInIpfs:
|
||||
// No need to filter. pinnedInIpfs is false
|
||||
// unless the filter is Pinned |
|
||||
// UnexpectedlyUnpinned. We filter at the end.
|
||||
info.Status = ipfsStatus.ToTrackerStatus()
|
||||
default:
|
||||
// Not on an operation
|
||||
// Not a meta pin
|
||||
// Not a remote pin
|
||||
// Not a pin on ipfs
|
||||
|
||||
// We understand that this is something that
|
||||
// should be pinned on IPFS and it is not.
|
||||
info.Status = api.TrackerStatusUnexpectedlyUnpinned
|
||||
info.Error = errUnexpectedlyUnpinned.Error()
|
||||
}
|
||||
if !filter.Match(info.Status) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !trySend(info) {
|
||||
return fmt.Errorf("error issuing PinInfo: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns information for a Cid pinned to the local IPFS node.
|
||||
|
@ -361,10 +462,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
|
|||
ipfsid := spt.getIPFSID(ctx)
|
||||
|
||||
// check if c has an inflight operation or errorred operation in optracker
|
||||
if oppi, ok := spt.optracker.GetExists(ctx, c); ok {
|
||||
// if it does return the status of the operation
|
||||
oppi.IPFS = ipfsid.ID
|
||||
oppi.IPFSAddresses = ipfsid.Addresses
|
||||
if oppi, ok := spt.optracker.GetExists(ctx, c, ipfsid); ok {
|
||||
return oppi
|
||||
}
|
||||
|
||||
|
@ -452,31 +550,46 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
|
|||
}
|
||||
|
||||
// RecoverAll attempts to recover all items tracked by this peer. It returns
|
||||
// items that have been re-queued.
|
||||
func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) {
|
||||
// any errors or when it is done re-tracking.
|
||||
func (spt *Tracker) RecoverAll(ctx context.Context, out chan<- api.PinInfo) error {
|
||||
defer close(out)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll")
|
||||
defer span.End()
|
||||
|
||||
// FIXME: make sure this returns a channel.
|
||||
statuses := spt.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
resp := make([]api.PinInfo, 0)
|
||||
for _, st := range statuses {
|
||||
statusesCh := make(chan api.PinInfo, 1024)
|
||||
err := spt.StatusAll(ctx, api.TrackerStatusUndefined, statusesCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for st := range statusesCh {
|
||||
// Break out if we shutdown. We might be going through
|
||||
// a very long list of statuses.
|
||||
select {
|
||||
case <-spt.ctx.Done():
|
||||
return nil, spt.ctx.Err()
|
||||
err = fmt.Errorf("RecoverAll aborted: %w", ctx.Err())
|
||||
logger.Error(err)
|
||||
return err
|
||||
default:
|
||||
r, err := spt.recoverWithPinInfo(ctx, st)
|
||||
p, err := spt.recoverWithPinInfo(ctx, st)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
err = fmt.Errorf("RecoverAll error: %w", err)
|
||||
logger.Error(err)
|
||||
return err
|
||||
}
|
||||
if r.Defined() {
|
||||
resp = append(resp, r)
|
||||
if p.Defined() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = fmt.Errorf("RecoverAll aborted: %w", ctx.Err())
|
||||
logger.Error(err)
|
||||
return err
|
||||
case out <- p:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover will trigger pinning or unpinning for items in
|
||||
|
@ -485,13 +598,7 @@ func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error)
|
|||
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover")
|
||||
defer span.End()
|
||||
|
||||
// Check if we have a status in the operation tracker and use that
|
||||
// pininfo. Otherwise, get a status by checking against IPFS and use
|
||||
// that.
|
||||
pi, ok := spt.optracker.GetExists(ctx, c)
|
||||
if !ok {
|
||||
pi = spt.Status(ctx, c)
|
||||
}
|
||||
pi := spt.Status(ctx, c)
|
||||
|
||||
recPi, err := spt.recoverWithPinInfo(ctx, pi)
|
||||
// if it was not enqueued, no updated pin-info is returned.
|
||||
|
@ -524,158 +631,29 @@ func (spt *Tracker) recoverWithPinInfo(ctx context.Context, pi api.PinInfo) (api
|
|||
return spt.Status(ctx, pi.Cid), nil
|
||||
}
|
||||
|
||||
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[cid.Cid]api.PinInfo, error) {
|
||||
func (spt *Tracker) ipfsPins(ctx context.Context) (<-chan api.IPFSPinInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll")
|
||||
defer span.End()
|
||||
|
||||
var ipsMap map[string]api.IPFSPinStatus
|
||||
err := spt.rpcClient.CallContext(
|
||||
ctx,
|
||||
"",
|
||||
"IPFSConnector",
|
||||
"PinLs",
|
||||
"recursive",
|
||||
&ipsMap,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
ipfsid := spt.getIPFSID(ctx)
|
||||
pins := make(map[cid.Cid]api.PinInfo, len(ipsMap))
|
||||
for cidstr, ips := range ipsMap {
|
||||
c, err := cid.Decode(cidstr)
|
||||
in := make(chan []string, 1) // type filter.
|
||||
in <- []string{"recursive", "direct"}
|
||||
close(in)
|
||||
out := make(chan api.IPFSPinInfo, pinsChannelSize)
|
||||
|
||||
go func() {
|
||||
err := spt.rpcClient.Stream(
|
||||
ctx,
|
||||
"",
|
||||
"IPFSConnector",
|
||||
"PinLs",
|
||||
in,
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
p := api.PinInfo{
|
||||
Cid: c,
|
||||
Name: "", // to be filled later
|
||||
Allocations: nil, // to be filled later
|
||||
Origins: nil, // to be filled later
|
||||
//Created: nil, // to be filled later
|
||||
Metadata: nil, // to be filled later
|
||||
Peer: spt.peerID,
|
||||
PinInfoShort: api.PinInfoShort{
|
||||
PeerName: spt.peerName,
|
||||
IPFS: ipfsid.ID,
|
||||
IPFSAddresses: ipfsid.Addresses,
|
||||
Status: ips.ToTrackerStatus(),
|
||||
TS: time.Now(), // to be set later
|
||||
AttemptCount: 0,
|
||||
PriorityPin: false,
|
||||
},
|
||||
}
|
||||
pins[c] = p
|
||||
}
|
||||
return pins, nil
|
||||
}
|
||||
|
||||
// localStatus returns a joint set of consensusState and ipfsStatus marking
|
||||
// pins which should be meta or remote and leaving any ipfs pins that aren't
|
||||
// in the consensusState out. If incExtra is true, Remote and Sharded pins
|
||||
// will be added to the status slice. If a filter is provided, only statuses
|
||||
// matching the filter will be returned.
|
||||
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool, filter api.TrackerStatus) (map[cid.Cid]api.PinInfo, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "tracker/stateless/localStatus")
|
||||
defer span.End()
|
||||
|
||||
// get shared state
|
||||
st, err := spt.getState(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only list the full pinset if we are interested in pin types that
|
||||
// require it. Otherwise said, this whole method is mostly a no-op
|
||||
// when filtering for queued/error items which are all in the operation
|
||||
// tracker.
|
||||
var statePins <-chan api.Pin
|
||||
if filter.Match(
|
||||
api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned |
|
||||
api.TrackerStatusSharded | api.TrackerStatusRemote) {
|
||||
statePins, err = st.List(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// no state pins
|
||||
ch := make(chan api.Pin)
|
||||
close(ch)
|
||||
statePins = ch
|
||||
}
|
||||
|
||||
var localpis map[cid.Cid]api.PinInfo
|
||||
// Only query IPFS if we want to status for pinned items
|
||||
if filter.Match(api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned) {
|
||||
localpis, err = spt.ipfsStatusAll(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pininfos := make(map[cid.Cid]api.PinInfo, len(statePins))
|
||||
ipfsid := spt.getIPFSID(ctx)
|
||||
for p := range statePins {
|
||||
ipfsInfo, pinnedInIpfs := localpis[p.Cid]
|
||||
// base pinInfo object - status to be filled.
|
||||
pinInfo := api.PinInfo{
|
||||
Cid: p.Cid,
|
||||
Name: p.Name,
|
||||
Peer: spt.peerID,
|
||||
Allocations: p.Allocations,
|
||||
Origins: p.Origins,
|
||||
Created: p.Timestamp,
|
||||
Metadata: p.Metadata,
|
||||
PinInfoShort: api.PinInfoShort{
|
||||
PeerName: spt.peerName,
|
||||
IPFS: ipfsid.ID,
|
||||
IPFSAddresses: ipfsid.Addresses,
|
||||
TS: p.Timestamp,
|
||||
AttemptCount: 0,
|
||||
PriorityPin: false,
|
||||
},
|
||||
}
|
||||
|
||||
switch {
|
||||
case p.Type == api.MetaType:
|
||||
if !incExtra || !filter.Match(api.TrackerStatusSharded) {
|
||||
continue
|
||||
}
|
||||
pinInfo.Status = api.TrackerStatusSharded
|
||||
pininfos[p.Cid] = pinInfo
|
||||
case p.IsRemotePin(spt.peerID):
|
||||
if !incExtra || !filter.Match(api.TrackerStatusRemote) {
|
||||
continue
|
||||
}
|
||||
pinInfo.Status = api.TrackerStatusRemote
|
||||
pininfos[p.Cid] = pinInfo
|
||||
case pinnedInIpfs: // always false unless filter matches TrackerStatusPinnned
|
||||
ipfsInfo.Name = p.Name
|
||||
ipfsInfo.TS = p.Timestamp
|
||||
ipfsInfo.Allocations = p.Allocations
|
||||
ipfsInfo.Origins = p.Origins
|
||||
ipfsInfo.Created = p.Timestamp
|
||||
ipfsInfo.Metadata = p.Metadata
|
||||
pininfos[p.Cid] = ipfsInfo
|
||||
default:
|
||||
// report as UNEXPECTEDLY_UNPINNED for this peer.
|
||||
// this will be overwritten if the operation tracker
|
||||
// has more info for this (an ongoing pinning
|
||||
// operation). Otherwise, it means something should be
|
||||
// pinned and it is not known by IPFS. Should be
|
||||
// handled to "recover".
|
||||
|
||||
pinInfo.Status = api.TrackerStatusUnexpectedlyUnpinned
|
||||
pinInfo.Error = errUnexpectedlyUnpinned.Error()
|
||||
pininfos[p.Cid] = pinInfo
|
||||
}
|
||||
}
|
||||
return pininfos, nil
|
||||
}()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo {
|
||||
|
|
|
@ -64,13 +64,17 @@ func (mock *mockIPFS) Unpin(ctx context.Context, in api.Pin, out *struct{}) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockIPFS) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
|
||||
// Must be consistent with PinLsCid
|
||||
m := map[string]api.IPFSPinStatus{
|
||||
test.Cid1.String(): api.IPFSPinStatusRecursive,
|
||||
test.Cid2.String(): api.IPFSPinStatusRecursive,
|
||||
func (mock *mockIPFS) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
|
||||
out <- api.IPFSPinInfo{
|
||||
Cid: api.Cid(test.Cid1),
|
||||
Type: api.IPFSPinStatusRecursive,
|
||||
}
|
||||
*out = m
|
||||
|
||||
out <- api.IPFSPinInfo{
|
||||
Cid: api.Cid(test.Cid2),
|
||||
Type: api.IPFSPinStatusRecursive,
|
||||
}
|
||||
close(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -207,7 +211,7 @@ func TestTrackUntrackWithCancel(t *testing.T) {
|
|||
|
||||
time.Sleep(100 * time.Millisecond) // let pinning start
|
||||
|
||||
pInfo := spt.optracker.Get(ctx, slowPin.Cid)
|
||||
pInfo := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{})
|
||||
if pInfo.Status == api.TrackerStatusUnpinned {
|
||||
t.Fatal("slowPin should be tracked")
|
||||
}
|
||||
|
@ -264,7 +268,7 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
|
|||
}
|
||||
|
||||
// fastPin should be queued because slow pin is pinning
|
||||
fastPInfo := spt.optracker.Get(ctx, fastPin.Cid)
|
||||
fastPInfo := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
|
||||
if fastPInfo.Status == api.TrackerStatusUnpinned {
|
||||
t.Fatal("fastPin should be tracked")
|
||||
}
|
||||
|
@ -281,7 +285,7 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
|
|||
t.Errorf("fastPin should be queued to pin but is %s", fastPInfo.Status)
|
||||
}
|
||||
|
||||
pi := spt.optracker.Get(ctx, fastPin.Cid)
|
||||
pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
|
||||
if pi.Cid == cid.Undef {
|
||||
t.Error("fastPin should have been removed from tracker")
|
||||
}
|
||||
|
@ -313,7 +317,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
|
|||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
pi := spt.optracker.Get(ctx, slowPin.Cid)
|
||||
pi := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{})
|
||||
if pi.Cid == cid.Undef {
|
||||
t.Fatal("expected slowPin to be tracked")
|
||||
}
|
||||
|
@ -374,7 +378,7 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pi := spt.optracker.Get(ctx, fastPin.Cid)
|
||||
pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
|
||||
if pi.Cid == cid.Undef {
|
||||
t.Fatal("c untrack operation should be tracked")
|
||||
}
|
||||
|
@ -405,11 +409,10 @@ func TestStatusAll(t *testing.T) {
|
|||
// - Build a state with one pins (Cid1,Cid4)
|
||||
// - The IPFS Mock reports Cid1 and Cid2
|
||||
// - Track a SlowCid additionally
|
||||
|
||||
spt := testStatelessPinTracker(t, normalPin, normalPin2)
|
||||
slowPin := api.PinWithOpts(test.SlowCid1, pinOpts)
|
||||
spt := testStatelessPinTracker(t, normalPin, normalPin2, slowPin)
|
||||
defer spt.Shutdown(ctx)
|
||||
|
||||
slowPin := api.PinWithOpts(test.SlowCid1, pinOpts)
|
||||
err := spt.Track(ctx, slowPin)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -421,20 +424,23 @@ func TestStatusAll(t *testing.T) {
|
|||
// * A slow CID pinning
|
||||
// * Cid1 is pinned
|
||||
// * Cid4 should be in PinError (it's in the state but not on IPFS)
|
||||
stAll := spt.StatusAll(ctx, api.TrackerStatusUndefined)
|
||||
if len(stAll) != 3 {
|
||||
t.Errorf("wrong status length. Expected 3, got: %d", len(stAll))
|
||||
stAll := make(chan api.PinInfo, 10)
|
||||
err = spt.StatusAll(ctx, api.TrackerStatusUndefined, stAll)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, pi := range stAll {
|
||||
n := 0
|
||||
for pi := range stAll {
|
||||
n++
|
||||
switch pi.Cid {
|
||||
case test.Cid1:
|
||||
if pi.Status != api.TrackerStatusPinned {
|
||||
t.Error("cid1 should be pinned")
|
||||
t.Error(test.Cid1, " should be pinned")
|
||||
}
|
||||
case test.Cid4:
|
||||
if pi.Status != api.TrackerStatusUnexpectedlyUnpinned {
|
||||
t.Error("cid2 should be in unexpectedly_unpinned status")
|
||||
t.Error(test.Cid2, " should be in unexpectedly_unpinned status")
|
||||
}
|
||||
case test.SlowCid1:
|
||||
if pi.Status != api.TrackerStatusPinning {
|
||||
|
@ -447,6 +453,9 @@ func TestStatusAll(t *testing.T) {
|
|||
t.Error("IPFS field should be set")
|
||||
}
|
||||
}
|
||||
if n != 3 {
|
||||
t.Errorf("wrong status length. Expected 3, got: %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStatus checks that the Status calls correctly reports tracked
|
||||
|
@ -565,12 +574,3 @@ func TestAttemptCountAndPriority(t *testing.T) {
|
|||
t.Errorf("errPin should have 2 attempt counts to unpin: %+v", st)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTracker_localStatus(b *testing.B) {
|
||||
tracker := testStatelessPinTracker(b)
|
||||
ctx := context.Background()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
tracker.localStatus(ctx, true, api.TrackerStatusUndefined)
|
||||
}
|
||||
}
|
||||
|
|
89
rpc_api.go
89
rpc_api.go
|
@ -2,6 +2,7 @@ package ipfscluster
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
"github.com/ipfs/ipfs-cluster/state"
|
||||
|
@ -32,6 +33,8 @@ const (
|
|||
// RPCEndpointType controls how access is granted to an RPC endpoint
|
||||
type RPCEndpointType int
|
||||
|
||||
const rpcStreamBufferSize = 1024
|
||||
|
||||
// A trick to find where something is used (i.e. Cluster.Pin):
|
||||
// grep -R -B 3 '"Pin"' | grep -C 1 '"Cluster"'.
|
||||
// This does not cover globalPinInfo*(...) broadcasts nor redirects to leader
|
||||
|
@ -63,6 +66,7 @@ func newRPCServer(c *Cluster) (*rpc.Server, error) {
|
|||
version.RPCProtocol,
|
||||
rpc.WithServerStatsHandler(&ocgorpc.ServerHandler{}),
|
||||
rpc.WithAuthorizeFunc(authF),
|
||||
rpc.WithStreamBufferSize(rpcStreamBufferSize),
|
||||
)
|
||||
} else {
|
||||
s = rpc.NewServer(c.host, version.RPCProtocol, rpc.WithAuthorizeFunc(authF))
|
||||
|
@ -201,17 +205,7 @@ func (rpcapi *ClusterRPCAPI) UnpinPath(ctx context.Context, in api.PinPath, out
|
|||
|
||||
// Pins runs Cluster.Pins().
|
||||
func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error {
|
||||
pinCh, err := rpcapi.c.PinsChannel(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for pin := range pinCh {
|
||||
out <- pin
|
||||
}
|
||||
|
||||
close(out)
|
||||
return ctx.Err()
|
||||
return rpcapi.c.Pins(ctx, out)
|
||||
}
|
||||
|
||||
// PinGet runs Cluster.PinGet().
|
||||
|
@ -275,20 +269,15 @@ func (rpcapi *ClusterRPCAPI) Join(ctx context.Context, in api.Multiaddr, out *st
|
|||
}
|
||||
|
||||
// StatusAll runs Cluster.StatusAll().
|
||||
func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.GlobalPinInfo) error {
|
||||
pinfos, err := rpcapi.c.StatusAll(ctx, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = pinfos
|
||||
return nil
|
||||
func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
|
||||
filter := <-in
|
||||
return rpcapi.c.StatusAll(ctx, filter, out)
|
||||
}
|
||||
|
||||
// StatusAllLocal runs Cluster.StatusAllLocal().
|
||||
func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error {
|
||||
pinfos := rpcapi.c.StatusAllLocal(ctx, in)
|
||||
*out = pinfos
|
||||
return nil
|
||||
func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
filter := <-in
|
||||
return rpcapi.c.StatusAllLocal(ctx, filter, out)
|
||||
}
|
||||
|
||||
// Status runs Cluster.Status().
|
||||
|
@ -309,23 +298,13 @@ func (rpcapi *ClusterRPCAPI) StatusLocal(ctx context.Context, in cid.Cid, out *a
|
|||
}
|
||||
|
||||
// RecoverAll runs Cluster.RecoverAll().
|
||||
func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfo) error {
|
||||
pinfos, err := rpcapi.c.RecoverAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = pinfos
|
||||
return nil
|
||||
func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error {
|
||||
return rpcapi.c.RecoverAll(ctx, out)
|
||||
}
|
||||
|
||||
// RecoverAllLocal runs Cluster.RecoverAllLocal().
|
||||
func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfo) error {
|
||||
pinfos, err := rpcapi.c.RecoverAllLocal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = pinfos
|
||||
return nil
|
||||
func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
|
||||
return rpcapi.c.RecoverAllLocal(ctx, out)
|
||||
}
|
||||
|
||||
// Recover runs Cluster.Recover().
|
||||
|
@ -469,11 +448,17 @@ func (rpcapi *PinTrackerRPCAPI) Untrack(ctx context.Context, in api.Pin, out *st
|
|||
}
|
||||
|
||||
// StatusAll runs PinTracker.StatusAll().
|
||||
func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error {
|
||||
func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll")
|
||||
defer span.End()
|
||||
*out = rpcapi.tracker.StatusAll(ctx, in)
|
||||
return nil
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(out)
|
||||
return ctx.Err()
|
||||
case filter := <-in:
|
||||
return rpcapi.tracker.StatusAll(ctx, filter, out)
|
||||
}
|
||||
}
|
||||
|
||||
// Status runs PinTracker.Status().
|
||||
|
@ -486,15 +471,10 @@ func (rpcapi *PinTrackerRPCAPI) Status(ctx context.Context, in cid.Cid, out *api
|
|||
}
|
||||
|
||||
// RecoverAll runs PinTracker.RecoverAll().f
|
||||
func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfo) error {
|
||||
func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
|
||||
ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll")
|
||||
defer span.End()
|
||||
pinfos, err := rpcapi.tracker.RecoverAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*out = pinfos
|
||||
return nil
|
||||
return rpcapi.tracker.RecoverAll(ctx, out)
|
||||
}
|
||||
|
||||
// Recover runs PinTracker.Recover().
|
||||
|
@ -533,13 +513,18 @@ func (rpcapi *IPFSConnectorRPCAPI) PinLsCid(ctx context.Context, in api.Pin, out
|
|||
}
|
||||
|
||||
// PinLs runs IPFSConnector.PinLs().
|
||||
func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
|
||||
m, err := rpcapi.ipfs.PinLs(ctx, in)
|
||||
if err != nil {
|
||||
return err
|
||||
func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(out)
|
||||
return ctx.Err()
|
||||
case pinTypes, ok := <-in:
|
||||
if !ok {
|
||||
close(out)
|
||||
return errors.New("no pinType provided for pin/ls")
|
||||
}
|
||||
return rpcapi.ipfs.PinLs(ctx, pinTypes, out)
|
||||
}
|
||||
*out = m
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigKey runs IPFSConnector.ConfigKey().
|
||||
|
|
|
@ -12,17 +12,17 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt
|
|||
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
|
||||
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
|
||||
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
|
||||
cluster_kill && sleep 5 &&
|
||||
ipfs-cluster-service --config "test-config" state cleanup -f &&
|
||||
cluster_start && sleep 5 &&
|
||||
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
|
||||
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
|
||||
'
|
||||
|
||||
test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
|
||||
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
|
||||
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
|
||||
cluster_kill && sleep 5 &&
|
||||
ipfs-cluster-service --config "test-config" state export -f import.json &&
|
||||
ipfs-cluster-service --config "test-config" state cleanup -f &&
|
||||
|
@ -30,7 +30,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
|
|||
cluster_start && sleep 5 &&
|
||||
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
|
||||
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
|
||||
'
|
||||
|
||||
cluster_kill
|
||||
|
@ -42,17 +42,17 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft
|
|||
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
|
||||
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
|
||||
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
|
||||
cluster_kill && sleep 5 &&
|
||||
ipfs-cluster-service --config "test-config" state cleanup -f &&
|
||||
cluster_start && sleep 5 &&
|
||||
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
|
||||
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
|
||||
'
|
||||
|
||||
test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
|
||||
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
|
||||
ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
|
||||
cluster_kill && sleep 5 &&
|
||||
ipfs-cluster-service --config "test-config" state export -f import.json &&
|
||||
ipfs-cluster-service --config "test-config" state cleanup -f &&
|
||||
|
@ -60,7 +60,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
|
|||
cluster_start && sleep 5 &&
|
||||
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
|
||||
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ]
|
||||
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
|
||||
'
|
||||
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ package dsstate
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/ipfs-cluster/api"
|
||||
|
@ -122,9 +123,11 @@ func (st *State) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
|||
return ok, nil
|
||||
}
|
||||
|
||||
// List returns the unsorted list of all Pins that have been added to the
|
||||
// datastore.
|
||||
func (st *State) List(ctx context.Context) (<-chan api.Pin, error) {
|
||||
// List sends all the pins on the pinset on the given channel.
|
||||
// Returns and closes channel when done.
|
||||
func (st *State) List(ctx context.Context, out chan<- api.Pin) error {
|
||||
defer close(out)
|
||||
|
||||
_, span := trace.StartSpan(ctx, "state/dsstate/List")
|
||||
defer span.End()
|
||||
|
||||
|
@ -134,52 +137,49 @@ func (st *State) List(ctx context.Context) (<-chan api.Pin, error) {
|
|||
|
||||
results, err := st.dsRead.Query(ctx, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
pinsCh := make(chan api.Pin, 1024)
|
||||
go func() {
|
||||
defer close(pinsCh)
|
||||
defer results.Close()
|
||||
|
||||
defer results.Close()
|
||||
|
||||
total := 0
|
||||
for r := range results.Next() {
|
||||
// Abort if we shutdown.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Warningf("Full pinset listing aborted: %s", ctx.Err())
|
||||
return
|
||||
default:
|
||||
}
|
||||
if r.Error != nil {
|
||||
logger.Errorf("error in query result: %s", r.Error)
|
||||
return
|
||||
}
|
||||
k := ds.NewKey(r.Key)
|
||||
ci, err := st.unkey(k)
|
||||
if err != nil {
|
||||
logger.Warn("bad key (ignoring). key: ", k, "error: ", err)
|
||||
continue
|
||||
}
|
||||
|
||||
p, err := st.deserializePin(ci, r.Value)
|
||||
if err != nil {
|
||||
logger.Errorf("error deserializing pin (%s): %s", r.Key, err)
|
||||
continue
|
||||
}
|
||||
pinsCh <- p
|
||||
|
||||
if total > 0 && total%500000 == 0 {
|
||||
logger.Infof("Full pinset listing in progress: %d pins so far", total)
|
||||
}
|
||||
total++
|
||||
total := 0
|
||||
for r := range results.Next() {
|
||||
// Abort if we shutdown.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = fmt.Errorf("full pinset listing aborted: %w", ctx.Err())
|
||||
logger.Warning(err)
|
||||
return err
|
||||
default:
|
||||
}
|
||||
if total >= 500000 {
|
||||
logger.Infof("Full pinset listing finished: %d pins", total)
|
||||
if r.Error != nil {
|
||||
err := fmt.Errorf("error in query result: %w", r.Error)
|
||||
logger.Error(err)
|
||||
return err
|
||||
}
|
||||
k := ds.NewKey(r.Key)
|
||||
ci, err := st.unkey(k)
|
||||
if err != nil {
|
||||
logger.Warn("bad key (ignoring). key: ", k, "error: ", err)
|
||||
continue
|
||||
}
|
||||
}()
|
||||
|
||||
return pinsCh, nil
|
||||
p, err := st.deserializePin(ci, r.Value)
|
||||
if err != nil {
|
||||
logger.Errorf("error deserializing pin (%s): %s", r.Key, err)
|
||||
continue
|
||||
}
|
||||
out <- p
|
||||
|
||||
if total > 0 && total%500000 == 0 {
|
||||
logger.Infof("Full pinset listing in progress: %d pins so far", total)
|
||||
}
|
||||
total++
|
||||
}
|
||||
if total >= 500000 {
|
||||
logger.Infof("Full pinset listing finished: %d pins", total)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrate migrates an older state version to the current one.
|
||||
|
|
|
@ -93,10 +93,13 @@ func TestList(t *testing.T) {
|
|||
}()
|
||||
st := newState(t)
|
||||
st.Add(ctx, c)
|
||||
pinCh, err := st.List(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out := make(chan api.Pin)
|
||||
go func() {
|
||||
err := st.List(ctx, out)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
|
@ -104,7 +107,7 @@ func TestList(t *testing.T) {
|
|||
var list0 api.Pin
|
||||
for {
|
||||
select {
|
||||
case p, ok := <-pinCh:
|
||||
case p, ok := <-out:
|
||||
if !ok && !list0.Cid.Defined() {
|
||||
t.Fatal("should have read list0 first")
|
||||
}
|
||||
|
|
|
@ -10,10 +10,9 @@ import (
|
|||
|
||||
type empty struct{}
|
||||
|
||||
func (e *empty) List(ctx context.Context) (<-chan api.Pin, error) {
|
||||
ch := make(chan api.Pin)
|
||||
close(ch)
|
||||
return ch, nil
|
||||
func (e *empty) List(ctx context.Context, out chan<- api.Pin) error {
|
||||
close(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *empty) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
|
|
|
@ -34,7 +34,7 @@ type State interface {
|
|||
// ReadOnly represents the read side of a State.
|
||||
type ReadOnly interface {
|
||||
// List lists all the pins in the state.
|
||||
List(context.Context) (<-chan api.Pin, error)
|
||||
List(context.Context, chan<- api.Pin) error
|
||||
// Has returns true if the state is holding information for a Cid.
|
||||
Has(context.Context, cid.Cid) (bool, error)
|
||||
// Get returns the information attacthed to this pin, if any. If the
|
||||
|
|
|
@ -58,7 +58,7 @@ type mockPinType struct {
|
|||
Type string
|
||||
}
|
||||
|
||||
type mockPinLsResp struct {
|
||||
type mockPinLsAllResp struct {
|
||||
Keys map[string]mockPinType
|
||||
}
|
||||
|
||||
|
@ -268,19 +268,35 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
|
|||
j, _ := json.Marshal(resp)
|
||||
w.Write(j)
|
||||
case "pin/ls":
|
||||
query := r.URL.Query()
|
||||
stream := query.Get("stream") == "true"
|
||||
|
||||
arg, ok := extractCid(r.URL)
|
||||
if !ok {
|
||||
rMap := make(map[string]mockPinType)
|
||||
pins, err := m.pinMap.List(ctx)
|
||||
if err != nil {
|
||||
goto ERROR
|
||||
pins := make(chan api.Pin, 10)
|
||||
|
||||
go func() {
|
||||
m.pinMap.List(ctx, pins)
|
||||
}()
|
||||
|
||||
if stream {
|
||||
for p := range pins {
|
||||
j, _ := json.Marshal(api.IPFSPinInfo{
|
||||
Cid: api.Cid(p.Cid),
|
||||
Type: p.Mode.ToIPFSPinStatus(),
|
||||
})
|
||||
w.Write(j)
|
||||
}
|
||||
break
|
||||
} else {
|
||||
rMap := make(map[string]mockPinType)
|
||||
for p := range pins {
|
||||
rMap[p.Cid.String()] = mockPinType{p.Mode.String()}
|
||||
}
|
||||
j, _ := json.Marshal(mockPinLsAllResp{rMap})
|
||||
w.Write(j)
|
||||
break
|
||||
}
|
||||
for p := range pins {
|
||||
rMap[p.Cid.String()] = mockPinType{p.Mode.String()}
|
||||
}
|
||||
j, _ := json.Marshal(mockPinLsResp{rMap})
|
||||
w.Write(j)
|
||||
break
|
||||
}
|
||||
|
||||
cidStr := arg
|
||||
|
@ -301,16 +317,28 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if c.Equals(Cid4) {
|
||||
// this a v1 cid. Do not return default-base32 but base58btc encoding of it
|
||||
w.Write([]byte(`{ "Keys": { "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1": { "Type": "recursive" }}}`))
|
||||
return
|
||||
if stream {
|
||||
if c.Equals(Cid4) {
|
||||
// this a v1 cid. Do not return default-base32 but base58btc encoding of it
|
||||
w.Write([]byte(`{ "Cid": "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1", "Type": "recursive" }`))
|
||||
break
|
||||
}
|
||||
j, _ := json.Marshal(api.IPFSPinInfo{
|
||||
Cid: api.Cid(pinObj.Cid),
|
||||
Type: pinObj.Mode.ToIPFSPinStatus(),
|
||||
})
|
||||
w.Write(j)
|
||||
} else {
|
||||
if c.Equals(Cid4) {
|
||||
// this a v1 cid. Do not return default-base32 but base58btc encoding of it
|
||||
w.Write([]byte(`{ "Keys": { "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1": { "Type": "recursive" }}}`))
|
||||
break
|
||||
}
|
||||
rMap := make(map[string]mockPinType)
|
||||
rMap[cidStr] = mockPinType{pinObj.Mode.String()}
|
||||
j, _ := json.Marshal(mockPinLsAllResp{rMap})
|
||||
w.Write(j)
|
||||
}
|
||||
rMap := make(map[string]mockPinType)
|
||||
rMap[cidStr] = mockPinType{pinObj.Mode.String()}
|
||||
j, _ := json.Marshal(mockPinLsResp{rMap})
|
||||
w.Write(j)
|
||||
|
||||
case "swarm/connect":
|
||||
arg, ok := extractCid(r.URL)
|
||||
if !ok {
|
||||
|
@ -424,10 +452,10 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
case "repo/stat":
|
||||
sizeOnly := r.URL.Query().Get("size-only")
|
||||
pinsCh, err := m.pinMap.List(ctx)
|
||||
if err != nil {
|
||||
goto ERROR
|
||||
}
|
||||
pinsCh := make(chan api.Pin, 10)
|
||||
go func() {
|
||||
m.pinMap.List(ctx, pinsCh)
|
||||
}()
|
||||
|
||||
var pins []api.Pin
|
||||
for p := range pinsCh {
|
||||
|
|
|
@ -34,8 +34,8 @@ func NewMockRPCClient(t testing.TB) *rpc.Client {
|
|||
// NewMockRPCClientWithHost returns a mock ipfs-cluster RPC server
|
||||
// initialized with a given host.
|
||||
func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client {
|
||||
s := rpc.NewServer(h, "mock")
|
||||
c := rpc.NewClientWithServer(h, "mock", s)
|
||||
s := rpc.NewServer(h, "mock", rpc.WithStreamBufferSize(1024))
|
||||
c := rpc.NewClientWithServer(h, "mock", s, rpc.WithMultiStreamBufferSize(1024))
|
||||
err := s.RegisterName("Cluster", &mockCluster{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -230,7 +230,10 @@ func (mock *mockCluster) ConnectGraph(ctx context.Context, in struct{}, out *api
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockCluster) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.GlobalPinInfo) error {
|
||||
func (mock *mockCluster) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
|
||||
defer close(out)
|
||||
filter := <-in
|
||||
|
||||
pid := peer.Encode(PeerID1)
|
||||
gPinInfos := []api.GlobalPinInfo{
|
||||
{
|
||||
|
@ -272,23 +275,21 @@ func (mock *mockCluster) StatusAll(ctx context.Context, in api.TrackerStatus, ou
|
|||
// a single peer, we will not have an entry for the cid at all.
|
||||
for _, gpi := range gPinInfos {
|
||||
for id, pi := range gpi.PeerMap {
|
||||
if !in.Match(pi.Status) {
|
||||
if !filter.Match(pi.Status) {
|
||||
delete(gpi.PeerMap, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
filtered := make([]api.GlobalPinInfo, 0, len(gPinInfos))
|
||||
for _, gpi := range gPinInfos {
|
||||
if len(gpi.PeerMap) > 0 {
|
||||
filtered = append(filtered, gpi)
|
||||
out <- gpi
|
||||
}
|
||||
}
|
||||
*out = filtered
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockCluster) StatusAllLocal(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error {
|
||||
func (mock *mockCluster) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
return (&mockPinTracker{}).StatusAll(ctx, in, out)
|
||||
}
|
||||
|
||||
|
@ -324,11 +325,14 @@ func (mock *mockCluster) StatusLocal(ctx context.Context, in cid.Cid, out *api.P
|
|||
return (&mockPinTracker{}).Status(ctx, in, out)
|
||||
}
|
||||
|
||||
func (mock *mockCluster) RecoverAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfo) error {
|
||||
return mock.StatusAll(ctx, api.TrackerStatusUndefined, out)
|
||||
func (mock *mockCluster) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error {
|
||||
f := make(chan api.TrackerStatus, 1)
|
||||
f <- api.TrackerStatusUndefined
|
||||
close(f)
|
||||
return mock.StatusAll(ctx, f, out)
|
||||
}
|
||||
|
||||
func (mock *mockCluster) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfo) error {
|
||||
func (mock *mockCluster) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
|
||||
return (&mockPinTracker{}).RecoverAll(ctx, in, out)
|
||||
}
|
||||
|
||||
|
@ -421,7 +425,10 @@ func (mock *mockPinTracker) Untrack(ctx context.Context, in api.Pin, out *struct
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockPinTracker) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error {
|
||||
func (mock *mockPinTracker) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
|
||||
defer close(out)
|
||||
filter := <-in
|
||||
|
||||
pinInfos := []api.PinInfo{
|
||||
{
|
||||
Cid: Cid1,
|
||||
|
@ -440,14 +447,11 @@ func (mock *mockPinTracker) StatusAll(ctx context.Context, in api.TrackerStatus,
|
|||
},
|
||||
},
|
||||
}
|
||||
filtered := make([]api.PinInfo, 0, len(pinInfos))
|
||||
for _, pi := range pinInfos {
|
||||
if in.Match(pi.Status) {
|
||||
filtered = append(filtered, pi)
|
||||
if filter.Match(pi.Status) {
|
||||
out <- pi
|
||||
}
|
||||
}
|
||||
|
||||
*out = filtered
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -467,8 +471,8 @@ func (mock *mockPinTracker) Status(ctx context.Context, in cid.Cid, out *api.Pin
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockPinTracker) RecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfo) error {
|
||||
*out = make([]api.PinInfo, 0)
|
||||
func (mock *mockPinTracker) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
|
||||
close(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -534,12 +538,10 @@ func (mock *mockIPFSConnector) PinLsCid(ctx context.Context, in api.Pin, out *ap
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockIPFSConnector) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error {
|
||||
m := map[string]api.IPFSPinStatus{
|
||||
Cid1.String(): api.IPFSPinStatusRecursive,
|
||||
Cid3.String(): api.IPFSPinStatusRecursive,
|
||||
}
|
||||
*out = m
|
||||
func (mock *mockIPFSConnector) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
|
||||
out <- api.IPFSPinInfo{Cid: api.Cid(Cid1), Type: api.IPFSPinStatusRecursive}
|
||||
out <- api.IPFSPinInfo{Cid: api.Cid(Cid3), Type: api.IPFSPinStatusRecursive}
|
||||
close(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ func (d *MockDAGService) Get(ctx context.Context, cid cid.Cid) (format.Node, err
|
|||
if n, ok := d.Nodes[cid]; ok {
|
||||
return n, nil
|
||||
}
|
||||
return nil, format.ErrNotFound
|
||||
return nil, format.ErrNotFound{Cid: cid}
|
||||
}
|
||||
|
||||
// GetMany reads many nodes.
|
||||
|
@ -312,7 +312,7 @@ func (d *MockDAGService) GetMany(ctx context.Context, cids []cid.Cid) <-chan *fo
|
|||
if n, ok := d.Nodes[c]; ok {
|
||||
out <- &format.NodeOption{Node: n}
|
||||
} else {
|
||||
out <- &format.NodeOption{Err: format.ErrNotFound}
|
||||
out <- &format.NodeOption{Err: format.ErrNotFound{Cid: c}}
|
||||
}
|
||||
}
|
||||
close(out)
|
||||
|
|
Loading…
Reference in New Issue
Block a user