Merge pull request #1607 from ipfs/feat/streaming-status

Pintracker: streaming methods
This commit is contained in:
Hector Sanjuan 2022-03-22 15:52:34 +01:00 committed by GitHub
commit 2d94c42310
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 1606 additions and 971 deletions

View File

@ -54,6 +54,9 @@ func init() {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
} }
// StreamChannelSize is used to define buffer sizes for channels.
const StreamChannelSize = 1024
// Common errors // Common errors
var ( var (
// ErrNoEndpointEnabled is returned when the API is created but // ErrNoEndpointEnabled is returned when the API is created but
@ -583,19 +586,23 @@ func (api *API) SendResponse(
w.WriteHeader(status) w.WriteHeader(status)
} }
// Iterator is a function that returns the next item. // StreamIterator is a function that returns the next item. It is used in
type Iterator func() (interface{}, bool, error) // StreamResponse.
type StreamIterator func() (interface{}, bool, error)
// StreamResponse reads from an iterator and sends the response. // StreamResponse reads from an iterator and sends the response.
func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) { func (api *API) StreamResponse(w http.ResponseWriter, next StreamIterator, errCh chan error) {
api.SetHeaders(w) api.SetHeaders(w)
enc := json.NewEncoder(w) enc := json.NewEncoder(w)
flusher, flush := w.(http.Flusher) flusher, flush := w.(http.Flusher)
w.Header().Set("Trailer", "X-Stream-Error") w.Header().Set("Trailer", "X-Stream-Error")
total := 0 total := 0
var err error
var ok bool
var item interface{}
for { for {
item, ok, err := next() item, ok, err = next()
if total == 0 { if total == 0 {
if err != nil { if err != nil {
st := http.StatusInternalServerError st := http.StatusInternalServerError
@ -612,16 +619,15 @@ func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) {
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
return return
} }
w.WriteHeader(http.StatusOK)
} }
if err != nil { if err != nil {
w.Header().Set("X-Stream-Error", err.Error()) break
// trailer error
return
} }
// finish just fine // finish just fine
if !ok { if !ok {
return break
} }
// we have an item // we have an item
@ -635,9 +641,19 @@ func (api *API) StreamResponse(w http.ResponseWriter, next Iterator) {
flusher.Flush() flusher.Flush()
} }
} }
if err != nil {
w.Header().Set("X-Stream-Error", err.Error())
}
// check for function errors
for funcErr := range errCh {
if funcErr != nil {
w.Header().Add("X-Stream-Error", funcErr.Error())
}
}
} }
// SetsHeaders sets all the headers that are common to all responses // SetHeaders sets all the headers that are common to all responses
// from this API. Called automatically from SendResponse(). // from this API. Called automatically from SendResponse().
func (api *API) SetHeaders(w http.ResponseWriter) { func (api *API) SetHeaders(w http.ResponseWriter) {
for header, values := range api.config.Headers { for header, values := range api.config.Headers {

View File

@ -54,7 +54,7 @@ func ProcessResp(t *testing.T, httpResp *http.Response, err error, resp interfac
// ProcessStreamingResp decodes a streaming response into the given type // ProcessStreamingResp decodes a streaming response into the given type
// and fails the test on error. // and fails the test on error.
func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) { func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}, trailerError bool) {
if err != nil { if err != nil {
t.Fatal("error making streaming request: ", err) t.Fatal("error making streaming request: ", err)
} }
@ -97,6 +97,13 @@ func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp
} }
} }
} }
trailerMsg := httpResp.Trailer.Get("X-Stream-Error")
if trailerError && trailerMsg == "" {
t.Error("expected trailer error")
}
if !trailerError && trailerMsg != "" {
t.Error("got trailer error: ", trailerMsg)
}
} }
// CheckHeaders checks that all the headers are set to what is expected. // CheckHeaders checks that all the headers are set to what is expected.
@ -246,19 +253,19 @@ func MakeStreamingPost(t *testing.T, api API, url string, body io.Reader, conten
req.Header.Set("Content-Type", contentType) req.Header.Set("Content-Type", contentType)
req.Header.Set("Origin", ClientOrigin) req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req) httpResp, err := c.Do(req)
ProcessStreamingResp(t, httpResp, err, resp) ProcessStreamingResp(t, httpResp, err, resp, false)
CheckHeaders(t, api.Headers(), url, httpResp.Header) CheckHeaders(t, api.Headers(), url, httpResp.Header)
} }
// MakeStreamingGet performs a GET request and uses ProcessStreamingResp // MakeStreamingGet performs a GET request and uses ProcessStreamingResp
func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}) { func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}, trailerError bool) {
h := MakeHost(t, api) h := MakeHost(t, api)
defer h.Close() defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url)) c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
req.Header.Set("Origin", ClientOrigin) req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req) httpResp, err := c.Do(req)
ProcessStreamingResp(t, httpResp, err, resp) ProcessStreamingResp(t, httpResp, err, resp, trailerError)
CheckHeaders(t, api.Headers(), url, httpResp.Header) CheckHeaders(t, api.Headers(), url, httpResp.Header)
} }

View File

@ -386,10 +386,15 @@ func (proxy *Server) unpinHandler(w http.ResponseWriter, r *http.Request) {
func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) { func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r) proxy.setHeaders(w.Header(), r)
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
arg := r.URL.Query().Get("arg") arg := r.URL.Query().Get("arg")
stream := false
streamArg := r.URL.Query().Get("stream")
streamArg2 := r.URL.Query().Get("s")
if streamArg == "true" || streamArg2 == "true" {
stream = true
}
if arg != "" { if arg != "" {
c, err := cid.Decode(arg) c, err := cid.Decode(arg)
if err != nil { if err != nil {
@ -409,8 +414,23 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1) ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1)
return return
} }
pinLs.Keys[pin.Cid.String()] = ipfsPinType{ if stream {
Type: "recursive", ipinfo := api.IPFSPinInfo{
Cid: api.Cid(pin.Cid),
Type: pin.Mode.ToIPFSPinStatus(),
}
resBytes, _ := json.Marshal(ipinfo)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
} else {
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
resBytes, _ := json.Marshal(pinLs)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
} }
} else { } else {
in := make(chan struct{}) in := make(chan struct{})
@ -432,22 +452,42 @@ func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
) )
}() }()
for pin := range pins { if stream {
pinLs.Keys[pin.Cid.String()] = ipfsPinType{ w.Header().Set("Trailer", "X-Stream-Error")
Type: "recursive", w.WriteHeader(http.StatusOK)
for pin := range pins {
ipinfo := api.IPFSPinInfo{
Cid: api.Cid(pin.Cid),
Type: pin.Mode.ToIPFSPinStatus(),
}
resBytes, _ := json.Marshal(ipinfo)
w.Write(resBytes)
} }
} wg.Wait()
if err != nil {
w.Header().Add("X-Stream-Error", err.Error())
return
}
} else {
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
wg.Wait() for pin := range pins {
if err != nil { pinLs.Keys[pin.Cid.String()] = ipfsPinType{
ipfsErrorResponder(w, err.Error(), -1) Type: "recursive",
return }
}
wg.Wait()
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
resBytes, _ := json.Marshal(pinLs)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
} }
} }
resBytes, _ := json.Marshal(pinLs)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
} }
func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) { func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {

View File

@ -346,20 +346,27 @@ func (api *API) listPins(w http.ResponseWriter, r *http.Request) {
return return
} }
} else { } else {
var globalPinInfos []types.GlobalPinInfo in := make(chan types.TrackerStatus, 1)
err := api.rpcClient.CallContext( in <- tst
r.Context(), close(in)
"", out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
"Cluster", errCh := make(chan error, 1)
"StatusAll",
tst, go func() {
&globalPinInfos, defer close(errCh)
)
if err != nil { errCh <- api.rpcClient.Stream(
api.SendResponse(w, common.SetStatusAutomatically, err, nil) r.Context(),
return "",
} "Cluster",
for i, gpi := range globalPinInfos { "StatusAll",
in,
out,
)
}()
i := 0
for gpi := range out {
st := globalPinInfoToSvcPinStatus(gpi.Cid.String(), gpi) st := globalPinInfoToSvcPinStatus(gpi.Cid.String(), gpi)
if st.Status == pinsvc.StatusUndefined { if st.Status == pinsvc.StatusUndefined {
// i.e things unpinning // i.e things unpinning
@ -380,10 +387,17 @@ func (api *API) listPins(w http.ResponseWriter, r *http.Request) {
continue continue
} }
pinList.Results = append(pinList.Results, st) pinList.Results = append(pinList.Results, st)
if i+1 == opts.Limit { i++
if i == opts.Limit {
break break
} }
} }
err := <-errCh
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
} }
pinList.Count = len(pinList.Results) pinList.Count = len(pinList.Results)

View File

@ -85,9 +85,9 @@ type Client interface {
// is fetched from all cluster peers. // is fetched from all cluster peers.
Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error) Status(ctx context.Context, ci cid.Cid, local bool) (api.GlobalPinInfo, error)
// StatusCids status information for the requested CIDs. // StatusCids status information for the requested CIDs.
StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error
// StatusAll gathers Status() for all tracked items. // StatusAll gathers Status() for all tracked items.
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error
// Recover retriggers pin or unpin ipfs operations for a Cid in error // Recover retriggers pin or unpin ipfs operations for a Cid in error
// state. If local is true, the operation is limited to the current // state. If local is true, the operation is limited to the current
@ -96,7 +96,7 @@ type Client interface {
// RecoverAll triggers Recover() operations on all tracked items. If // RecoverAll triggers Recover() operations on all tracked items. If
// local is true, the operation is limited to the current peer. // local is true, the operation is limited to the current peer.
// Otherwise, it happens everywhere. // Otherwise, it happens everywhere.
RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error
// Alerts returns information health events in the cluster (expired // Alerts returns information health events in the cluster (expired
// metrics etc.). // metrics etc.).

View File

@ -253,16 +253,13 @@ func (lc *loadBalancingClient) Status(ctx context.Context, ci cid.Cid, local boo
// StatusCids returns Status() information for the given Cids. If local is // StatusCids returns Status() information for the given Cids. If local is
// true, the information affects only the current peer, otherwise the // true, the information affects only the current peer, otherwise the
// information is fetched from all cluster peers. // information is fetched from all cluster peers.
func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) { func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
var pinInfos []api.GlobalPinInfo
call := func(c Client) error { call := func(c Client) error {
var err error return c.StatusCids(ctx, cids, local, out)
pinInfos, err = c.StatusCids(ctx, cids, local)
return err
} }
err := lc.retry(0, call) err := lc.retry(0, call)
return pinInfos, err return err
} }
// StatusAll gathers Status() for all tracked items. If a filter is // StatusAll gathers Status() for all tracked items. If a filter is
@ -270,16 +267,13 @@ func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []cid.Cid, l
// will be returned. A filter can be built by merging TrackerStatuses with // will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or // a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all. // api.TrackerStatusUndefined), means all.
func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) { func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
var pinInfos []api.GlobalPinInfo
call := func(c Client) error { call := func(c Client) error {
var err error return c.StatusAll(ctx, filter, local, out)
pinInfos, err = c.StatusAll(ctx, filter, local)
return err
} }
err := lc.retry(0, call) err := lc.retry(0, call)
return pinInfos, err return err
} }
// Recover retriggers pin or unpin ipfs operations for a Cid in error state. // Recover retriggers pin or unpin ipfs operations for a Cid in error state.
@ -300,16 +294,13 @@ func (lc *loadBalancingClient) Recover(ctx context.Context, ci cid.Cid, local bo
// RecoverAll triggers Recover() operations on all tracked items. If local is // RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens // true, the operation is limited to the current peer. Otherwise, it happens
// everywhere. // everywhere.
func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) { func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
var pinInfos []api.GlobalPinInfo
call := func(c Client) error { call := func(c Client) error {
var err error return c.RecoverAll(ctx, local, out)
pinInfos, err = c.RecoverAll(ctx, local)
return err
} }
err := lc.retry(0, call) err := lc.retry(0, call)
return pinInfos, err return err
} }
// Alerts returns things that are wrong with cluster. // Alerts returns things that are wrong with cluster.

View File

@ -156,11 +156,11 @@ func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error
// Allocations returns the consensus state listing all tracked items and // Allocations returns the consensus state listing all tracked items and
// the peers that should be pinning them. // the peers that should be pinning them.
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error { func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/Allocations") ctx, span := trace.StartSpan(ctx, "client/Allocations")
defer span.End() defer span.End()
defer close(out)
types := []api.PinType{ types := []api.PinType{
api.DataType, api.DataType,
api.MetaType, api.MetaType,
@ -191,14 +191,13 @@ func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out
} }
f := url.QueryEscape(strings.Join(strFilter, ",")) f := url.QueryEscape(strings.Join(strFilter, ","))
err := c.doStream( return c.doStream(
ctx, ctx,
"GET", "GET",
fmt.Sprintf("/allocations?filter=%s", f), fmt.Sprintf("/allocations?filter=%s", f),
nil, nil,
nil, nil,
handler) handler)
return err
} }
// Allocation returns the current allocations for a given Cid. // Allocation returns the current allocations for a given Cid.
@ -233,8 +232,8 @@ func (c *defaultClient) Status(ctx context.Context, ci cid.Cid, local bool) (api
// StatusCids returns Status() information for the given Cids. If local is // StatusCids returns Status() information for the given Cids. If local is
// true, the information affects only the current peer, otherwise the // true, the information affects only the current peer, otherwise the
// information is fetched from all cluster peers. // information is fetched from all cluster peers.
func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) { func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local) return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local, out)
} }
// StatusAll gathers Status() for all tracked items. If a filter is // StatusAll gathers Status() for all tracked items. If a filter is
@ -242,21 +241,20 @@ func (c *defaultClient) StatusCids(ctx context.Context, cids []cid.Cid, local bo
// will be returned. A filter can be built by merging TrackerStatuses with // will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or // a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all. // api.TrackerStatusUndefined), means all.
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool) ([]api.GlobalPinInfo, error) { func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
return c.statusAllWithCids(ctx, filter, nil, local) return c.statusAllWithCids(ctx, filter, nil, local, out)
} }
func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []cid.Cid, local bool) ([]api.GlobalPinInfo, error) { func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []cid.Cid, local bool, out chan<- api.GlobalPinInfo) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/StatusAll") ctx, span := trace.StartSpan(ctx, "client/StatusAll")
defer span.End() defer span.End()
var gpis []api.GlobalPinInfo
filterStr := "" filterStr := ""
if filter != api.TrackerStatusUndefined { // undefined filter means "all" if filter != api.TrackerStatusUndefined { // undefined filter means "all"
filterStr = filter.String() filterStr = filter.String()
if filterStr == "" { if filterStr == "" {
return nil, errors.New("invalid filter value") return errors.New("invalid filter value")
} }
} }
@ -265,16 +263,25 @@ func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.Tracke
cidsStr[i] = c.String() cidsStr[i] = c.String()
} }
err := c.do( handler := func(dec *json.Decoder) error {
var obj api.GlobalPinInfo
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
return c.doStream(
ctx, ctx,
"GET", "GET",
fmt.Sprintf("/pins?local=%t&filter=%s&cids=%s", fmt.Sprintf("/pins?local=%t&filter=%s&cids=%s",
local, url.QueryEscape(filterStr), strings.Join(cidsStr, ",")), local, url.QueryEscape(filterStr), strings.Join(cidsStr, ",")),
nil, nil,
nil, nil,
&gpis, handler,
) )
return gpis, err
} }
// Recover retriggers pin or unpin ipfs operations for a Cid in error state. // Recover retriggers pin or unpin ipfs operations for a Cid in error state.
@ -292,13 +299,29 @@ func (c *defaultClient) Recover(ctx context.Context, ci cid.Cid, local bool) (ap
// RecoverAll triggers Recover() operations on all tracked items. If local is // RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens // true, the operation is limited to the current peer. Otherwise, it happens
// everywhere. // everywhere.
func (c *defaultClient) RecoverAll(ctx context.Context, local bool) ([]api.GlobalPinInfo, error) { func (c *defaultClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/RecoverAll") ctx, span := trace.StartSpan(ctx, "client/RecoverAll")
defer span.End() defer span.End()
var gpis []api.GlobalPinInfo handler := func(dec *json.Decoder) error {
err := c.do(ctx, "POST", fmt.Sprintf("/pins/recover?local=%t", local), nil, nil, &gpis) var obj api.GlobalPinInfo
return gpis, err err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
return c.doStream(
ctx,
"POST",
fmt.Sprintf("/pins/recover?local=%t", local),
nil,
nil,
handler)
} }
// Alerts returns information health events in the cluster (expired metrics // Alerts returns information health events in the cluster (expired metrics

View File

@ -346,10 +346,16 @@ func TestStatusCids(t *testing.T) {
defer shutdown(api) defer shutdown(api)
testF := func(t *testing.T, c Client) { testF := func(t *testing.T, c Client) {
pins, err := c.StatusCids(ctx, []cid.Cid{test.Cid1}, false) out := make(chan types.GlobalPinInfo)
if err != nil {
t.Fatal(err) go func() {
} err := c.StatusCids(ctx, []cid.Cid{test.Cid1}, false, out)
if err != nil {
t.Error(err)
}
}()
pins := collectGlobalPinInfos(t, out)
if len(pins) != 1 { if len(pins) != 1 {
t.Fatal("wrong number of pins returned") t.Fatal("wrong number of pins returned")
} }
@ -361,48 +367,87 @@ func TestStatusCids(t *testing.T) {
testClients(t, api, testF) testClients(t, api, testF)
} }
func collectGlobalPinInfos(t *testing.T, out <-chan types.GlobalPinInfo) []types.GlobalPinInfo {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var gpis []types.GlobalPinInfo
for {
select {
case <-ctx.Done():
t.Error(ctx.Err())
return gpis
case gpi, ok := <-out:
if !ok {
return gpis
}
gpis = append(gpis, gpi)
}
}
}
func TestStatusAll(t *testing.T) { func TestStatusAll(t *testing.T) {
ctx := context.Background() ctx := context.Background()
api := testAPI(t) api := testAPI(t)
defer shutdown(api) defer shutdown(api)
testF := func(t *testing.T, c Client) { testF := func(t *testing.T, c Client) {
pins, err := c.StatusAll(ctx, 0, false) out := make(chan types.GlobalPinInfo)
if err != nil { go func() {
t.Fatal(err) err := c.StatusAll(ctx, 0, false, out)
} if err != nil {
t.Error(err)
}
}()
pins := collectGlobalPinInfos(t, out)
if len(pins) == 0 { if len(pins) == 0 {
t.Error("there should be some pins") t.Error("there should be some pins")
} }
// With local true out2 := make(chan types.GlobalPinInfo)
pins, err = c.StatusAll(ctx, 0, true) go func() {
if err != nil { err := c.StatusAll(ctx, 0, true, out2)
t.Fatal(err) if err != nil {
} t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out2)
if len(pins) != 2 { if len(pins) != 2 {
t.Error("there should be two pins") t.Error("there should be two pins")
} }
// With filter option out3 := make(chan types.GlobalPinInfo)
pins, err = c.StatusAll(ctx, types.TrackerStatusPinning, false) go func() {
if err != nil { err := c.StatusAll(ctx, types.TrackerStatusPinning, false, out3)
t.Fatal(err) if err != nil {
} t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out3)
if len(pins) != 1 { if len(pins) != 1 {
t.Error("there should be one pin") t.Error("there should be one pin")
} }
pins, err = c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false) out4 := make(chan types.GlobalPinInfo)
if err != nil { go func() {
t.Fatal(err) err := c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false, out4)
} if err != nil {
t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out4)
if len(pins) != 2 { if len(pins) != 2 {
t.Error("there should be two pins") t.Error("there should be two pins")
} }
_, err = c.StatusAll(ctx, 1<<25, false) out5 := make(chan types.GlobalPinInfo, 1)
err := c.StatusAll(ctx, 1<<25, false, out5)
if err == nil { if err == nil {
t.Error("expected an error") t.Error("expected an error")
} }
@ -435,12 +480,14 @@ func TestRecoverAll(t *testing.T) {
defer shutdown(api) defer shutdown(api)
testF := func(t *testing.T, c Client) { testF := func(t *testing.T, c Client) {
_, err := c.RecoverAll(ctx, true) out := make(chan types.GlobalPinInfo, 10)
err := c.RecoverAll(ctx, true, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = c.RecoverAll(ctx, false) out2 := make(chan types.GlobalPinInfo, 10)
err = c.RecoverAll(ctx, false, out2)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -21,7 +21,6 @@ import (
"github.com/ipfs/ipfs-cluster/adder/adderutils" "github.com/ipfs/ipfs-cluster/adder/adderutils"
types "github.com/ipfs/ipfs-cluster/api" types "github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/api/common" "github.com/ipfs/ipfs-cluster/api/common"
"go.uber.org/multierr"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/host"
@ -457,12 +456,15 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
close(in) close(in)
pins := make(chan types.Pin) pins := make(chan types.Pin)
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(r.Context()) ctx, cancel := context.WithCancel(r.Context())
defer cancel() defer cancel()
go func() { go func() {
err := api.rpcClient.Stream( defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(), r.Context(),
"", "",
"Cluster", "Cluster",
@ -470,10 +472,6 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
in, in,
pins, pins,
) )
if err != nil {
logger.Error(err)
cancel()
}
}() }()
iter := func() (interface{}, bool, error) { iter := func() (interface{}, bool, error) {
@ -481,6 +479,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
var ok bool var ok bool
iterloop: iterloop:
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
break iterloop break iterloop
@ -498,7 +497,7 @@ func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
return p, ok, ctx.Err() return p, ok, ctx.Err()
} }
api.StreamResponse(w, iter) api.StreamResponse(w, iter, errCh)
} }
func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) { func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
@ -517,6 +516,9 @@ func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
} }
func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) { func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query() queryValues := r.URL.Query()
if queryValues.Get("cids") != "" { if queryValues.Get("cids") != "" {
api.statusCidsHandler(w, r) api.statusCidsHandler(w, r)
@ -525,8 +527,6 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
local := queryValues.Get("local") local := queryValues.Get("local")
var globalPinInfos []types.GlobalPinInfo
filterStr := queryValues.Get("filter") filterStr := queryValues.Get("filter")
filter := types.TrackerStatusFromString(filterStr) filter := types.TrackerStatusFromString(filterStr)
// FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a // FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a
@ -536,42 +536,68 @@ func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if local == "true" { var iter common.StreamIterator
var pinInfos []types.PinInfo in := make(chan types.TrackerStatus, 1)
in <- filter
close(in)
errCh := make(chan error, 1)
err := api.rpcClient.CallContext( if local == "true" {
r.Context(), out := make(chan types.PinInfo, common.StreamChannelSize)
"", iter = func() (interface{}, bool, error) {
"Cluster", select {
"StatusAllLocal", case <-ctx.Done():
filter, return nil, false, ctx.Err()
&pinInfos, case p, ok := <-out:
) return p.ToGlobal(), ok, nil
if err != nil { }
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
} }
globalPinInfos = pinInfosToGlobal(pinInfos)
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"StatusAllLocal",
in,
out,
)
}()
} else { } else {
err := api.rpcClient.CallContext( out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
r.Context(), iter = func() (interface{}, bool, error) {
"", select {
"Cluster", case <-ctx.Done():
"StatusAll", return nil, false, ctx.Err()
filter, case p, ok := <-out:
&globalPinInfos, return p, ok, nil
) }
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
} }
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"StatusAll",
in,
out,
)
}()
} }
api.SendResponse(w, common.SetStatusAutomatically, nil, globalPinInfos) api.StreamResponse(w, iter, errCh)
} }
// request statuses for multiple CIDs in parallel. // request statuses for multiple CIDs in parallel.
func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) { func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query() queryValues := r.URL.Query()
filterCidsStr := strings.Split(queryValues.Get("cids"), ",") filterCidsStr := strings.Split(queryValues.Get("cids"), ",")
var cids []cid.Cid var cids []cid.Cid
@ -587,17 +613,15 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
local := queryValues.Get("local") local := queryValues.Get("local")
type gpiResult struct { gpiCh := make(chan types.GlobalPinInfo, len(cids))
gpi types.GlobalPinInfo errCh := make(chan error, len(cids))
err error
}
gpiCh := make(chan gpiResult, len(cids))
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(len(cids)) wg.Add(len(cids))
// Close channel when done // Close channel when done
go func() { go func() {
wg.Wait() wg.Wait()
close(errCh)
close(gpiCh) close(gpiCh)
}() }()
@ -607,14 +631,18 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
defer wg.Done() defer wg.Done()
var pinInfo types.PinInfo var pinInfo types.PinInfo
err := api.rpcClient.CallContext( err := api.rpcClient.CallContext(
r.Context(), ctx,
"", "",
"Cluster", "Cluster",
"StatusLocal", "StatusLocal",
c, c,
&pinInfo, &pinInfo,
) )
gpiCh <- gpiResult{gpi: pinInfo.ToGlobal(), err: err} if err != nil {
errCh <- err
return
}
gpiCh <- pinInfo.ToGlobal()
}(ci) }(ci)
} }
} else { } else {
@ -623,25 +651,28 @@ func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
defer wg.Done() defer wg.Done()
var pinInfo types.GlobalPinInfo var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext( err := api.rpcClient.CallContext(
r.Context(), ctx,
"", "",
"Cluster", "Cluster",
"Status", "Status",
c, c,
&pinInfo, &pinInfo,
) )
gpiCh <- gpiResult{gpi: pinInfo, err: err} if err != nil {
errCh <- err
return
}
gpiCh <- pinInfo
}(ci) }(ci)
} }
} }
var gpis []types.GlobalPinInfo iter := func() (interface{}, bool, error) {
var err error gpi, ok := <-gpiCh
for gpiResult := range gpiCh { return gpi, ok, nil
gpis = append(gpis, gpiResult.gpi)
err = multierr.Append(err, gpiResult.err)
} }
api.SendResponse(w, common.SetStatusAutomatically, err, gpis)
api.StreamResponse(w, iter, errCh)
} }
func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) { func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
@ -676,31 +707,66 @@ func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
} }
func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) { func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query() queryValues := r.URL.Query()
local := queryValues.Get("local") local := queryValues.Get("local")
var iter common.StreamIterator
in := make(chan struct{})
close(in)
errCh := make(chan error, 1)
if local == "true" { if local == "true" {
var pinInfos []types.PinInfo out := make(chan types.PinInfo, common.StreamChannelSize)
err := api.rpcClient.CallContext( iter = func() (interface{}, bool, error) {
r.Context(), select {
"", case <-ctx.Done():
"Cluster", return nil, false, ctx.Err()
"RecoverAllLocal", case p, ok := <-out:
struct{}{}, return p.ToGlobal(), ok, nil
&pinInfos, }
) }
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfosToGlobal(pinInfos))
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"RecoverAllLocal",
in,
out,
)
}()
} else { } else {
var globalPinInfos []types.GlobalPinInfo out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
err := api.rpcClient.CallContext( iter = func() (interface{}, bool, error) {
r.Context(), select {
"", case <-ctx.Done():
"Cluster", return nil, false, ctx.Err()
"RecoverAll", case p, ok := <-out:
struct{}{}, return p, ok, nil
&globalPinInfos, }
) }
api.SendResponse(w, common.SetStatusAutomatically, err, globalPinInfos) go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"RecoverAll",
in,
out,
)
}()
} }
api.StreamResponse(w, iter, errCh)
} }
func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) { func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
@ -772,12 +838,3 @@ func repoGCToGlobal(r types.RepoGC) types.GlobalRepoGC {
}, },
} }
} }
func pinInfosToGlobal(pInfos []types.PinInfo) []types.GlobalPinInfo {
gPInfos := make([]types.GlobalPinInfo, len(pInfos))
for i, p := range pInfos {
gpi := p.ToGlobal()
gPInfos[i] = gpi
}
return gPInfos
}

View File

@ -222,7 +222,7 @@ func TestAPIAddFileEndpointShard(t *testing.T) {
defer closer.Close() defer closer.Close()
mpContentType := "multipart/form-data; boundary=" + body.Boundary() mpContentType := "multipart/form-data; boundary=" + body.Boundary()
resp := api.AddedOutput{} resp := api.AddedOutput{}
fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true" fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true&shard-size=1000000"
shardURL := url(rest) + fmtStr1 shardURL := url(rest) + fmtStr1
test.MakeStreamingPost(t, rest, shardURL, body, mpContentType, &resp) test.MakeStreamingPost(t, rest, shardURL, body, mpContentType, &resp)
} }
@ -507,14 +507,14 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
tf := func(t *testing.T, url test.URLFunc) { tf := func(t *testing.T, url test.URLFunc) {
var resp []api.Pin var resp []api.Pin
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp) test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp, false)
if len(resp) != 3 || if len(resp) != 3 ||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) || !resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
!resp[2].Cid.Equals(clustertest.Cid3) { !resp[2].Cid.Equals(clustertest.Cid3) {
t.Error("unexpected pin list: ", resp) t.Error("unexpected pin list: ", resp)
} }
test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp) test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp, false)
if len(resp) != 3 || if len(resp) != 3 ||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) || !resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
!resp[2].Cid.Equals(clustertest.Cid3) { !resp[2].Cid.Equals(clustertest.Cid3) {
@ -522,7 +522,7 @@ func TestAPIAllocationsEndpoint(t *testing.T) {
} }
errResp := api.Error{} errResp := api.Error{}
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp) test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp, false)
if errResp.Code != http.StatusBadRequest { if errResp.Code != http.StatusBadRequest {
t.Error("an invalid filter value should 400") t.Error("an invalid filter value should 400")
} }
@ -615,8 +615,9 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx) defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) { tf := func(t *testing.T, url test.URLFunc) {
var resp []*api.GlobalPinInfo var resp []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins", &resp)
test.MakeStreamingGet(t, rest, url(rest)+"/pins", &resp, false)
// mockPinTracker returns 3 items for Cluster.StatusAll // mockPinTracker returns 3 items for Cluster.StatusAll
if len(resp) != 3 || if len(resp) != 3 ||
@ -626,8 +627,8 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
} }
// Test local=true // Test local=true
var resp2 []*api.GlobalPinInfo var resp2 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?local=true", &resp2) test.MakeStreamingGet(t, rest, url(rest)+"/pins?local=true", &resp2, false)
// mockPinTracker calls pintracker.StatusAll which returns 2 // mockPinTracker calls pintracker.StatusAll which returns 2
// items. // items.
if len(resp2) != 2 { if len(resp2) != 2 {
@ -635,38 +636,38 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
} }
// Test with filter // Test with filter
var resp3 []*api.GlobalPinInfo var resp3 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?filter=queued", &resp3) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=queued", &resp3, false)
if len(resp3) != 0 { if len(resp3) != 0 {
t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3) t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3)
} }
var resp4 []*api.GlobalPinInfo var resp4 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4, false)
if len(resp4) != 1 { if len(resp4) != 1 {
t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4) t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4)
} }
var resp5 []*api.GlobalPinInfo var resp5 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5, false)
if len(resp5) != 1 { if len(resp5) != 1 {
t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5) t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5)
} }
var resp6 []*api.GlobalPinInfo var resp6 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?filter=error", &resp6) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error", &resp6, false)
if len(resp6) != 1 { if len(resp6) != 1 {
t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6) t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6)
} }
var resp7 []*api.GlobalPinInfo var resp7 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7, false)
if len(resp7) != 2 { if len(resp7) != 2 {
t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7) t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7)
} }
var errorResp api.Error var errorResp api.Error
test.MakeGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp) test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp, false)
if errorResp.Code != http.StatusBadRequest { if errorResp.Code != http.StatusBadRequest {
t.Error("an invalid filter value should 400") t.Error("an invalid filter value should 400")
} }
@ -681,32 +682,32 @@ func TestAPIStatusAllWithCidsEndpoint(t *testing.T) {
defer rest.Shutdown(ctx) defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) { tf := func(t *testing.T, url test.URLFunc) {
var resp []*api.GlobalPinInfo var resp []api.GlobalPinInfo
cids := []string{ cids := []string{
clustertest.Cid1.String(), clustertest.Cid1.String(),
clustertest.Cid2.String(), clustertest.Cid2.String(),
clustertest.Cid3.String(), clustertest.Cid3.String(),
clustertest.Cid4.String(), clustertest.Cid4.String(),
} }
test.MakeGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp) test.MakeStreamingGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp, false)
if len(resp) != 4 { if len(resp) != 4 {
t.Error("wrong number of responses") t.Error("wrong number of responses")
} }
// Test local=true // Test local=true
var resp2 []*api.GlobalPinInfo var resp2 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2) test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2, false)
if len(resp2) != 4 { if len(resp2) != 4 {
t.Error("wrong number of responses") t.Error("wrong number of responses")
} }
// Test with an error // Test with an error. This should produce a trailer error.
cids = append(cids, clustertest.ErrorCid.String()) cids = append(cids, clustertest.ErrorCid.String())
var errorResp api.Error var resp3 []api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &errorResp) test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp3, true)
if errorResp.Message != clustertest.ErrBadCid.Error() { if len(resp3) != 4 {
t.Error("expected an error") t.Error("wrong number of responses")
} }
} }
@ -782,14 +783,14 @@ func TestAPIRecoverAllEndpoint(t *testing.T) {
defer rest.Shutdown(ctx) defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) { tf := func(t *testing.T, url test.URLFunc) {
var resp []*api.GlobalPinInfo var resp []api.GlobalPinInfo
test.MakePost(t, rest, url(rest)+"/pins/recover?local=true", []byte{}, &resp) test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover?local=true", nil, "", &resp)
if len(resp) != 0 { if len(resp) != 0 {
t.Fatal("bad response length") t.Fatal("bad response length")
} }
var resp1 []*api.GlobalPinInfo var resp1 []api.GlobalPinInfo
test.MakePost(t, rest, url(rest)+"/pins/recover", []byte{}, &resp1) test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover", nil, "", &resp1)
if len(resp1) == 0 { if len(resp1) == 0 {
t.Fatal("bad response length") t.Fatal("bad response length")
} }

View File

@ -217,6 +217,36 @@ func IPFSPinStatusFromString(t string) IPFSPinStatus {
} }
} }
// String returns the string form of the status as written by IPFS.
func (ips IPFSPinStatus) String() string {
switch ips {
case IPFSPinStatusDirect:
return "direct"
case IPFSPinStatusRecursive:
return "recursive"
case IPFSPinStatusIndirect:
return "indirect"
default:
return ""
}
}
// UnmarshalJSON parses a status from JSON
func (ips *IPFSPinStatus) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
*ips = IPFSPinStatusFromString(str)
return nil
}
// MarshalJSON converts a status to JSON.
func (ips IPFSPinStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(ips.String())
}
// IsPinned returns true if the item is pinned as expected by the // IsPinned returns true if the item is pinned as expected by the
// maxDepth parameter. // maxDepth parameter.
func (ips IPFSPinStatus) IsPinned(maxDepth PinDepth) bool { func (ips IPFSPinStatus) IsPinned(maxDepth PinDepth) bool {
@ -247,6 +277,40 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
IPFSPinStatusError: TrackerStatusClusterError, //TODO(ajl): check suitability IPFSPinStatusError: TrackerStatusClusterError, //TODO(ajl): check suitability
} }
// Cid is a CID with the MarshalJSON/UnmarshalJSON methods overwritten.
type Cid cid.Cid
func (c Cid) String() string {
return cid.Cid(c).String()
}
// MarshalJSON marshals a CID as JSON as a normal CID string.
func (c Cid) MarshalJSON() ([]byte, error) {
return json.Marshal(c.String())
}
// UnmarshalJSON reads a CID from its representation as JSON string.
func (c *Cid) UnmarshalJSON(b []byte) error {
var cidStr string
err := json.Unmarshal(b, &cidStr)
if err != nil {
return err
}
cc, err := cid.Decode(cidStr)
if err != nil {
return err
}
*c = Cid(cc)
return nil
}
// IPFSPinInfo represents an IPFS Pin, which only has a CID and type.
// Its JSON form is what IPFS returns when querying a pinset.
type IPFSPinInfo struct {
Cid Cid `json:"Cid" codec:"c"`
Type IPFSPinStatus `json:"Type" codec:"t"`
}
// GlobalPinInfo contains cluster-wide status information about a tracked Cid, // GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer. // indexed by cluster peer.
type GlobalPinInfo struct { type GlobalPinInfo struct {
@ -320,6 +384,19 @@ type PinInfoShort struct {
PriorityPin bool `json:"priority_pin" codec:"y,omitempty"` PriorityPin bool `json:"priority_pin" codec:"y,omitempty"`
} }
// String provides a string representation of PinInfoShort.
func (pis PinInfoShort) String() string {
var b strings.Builder
fmt.Fprintf(&b, "status: %s\n", pis.Status)
fmt.Fprintf(&b, "peername: %s\n", pis.PeerName)
fmt.Fprintf(&b, "ipfs: %s\n", pis.IPFS)
fmt.Fprintf(&b, "ipfsAddresses: %v\n", pis.IPFSAddresses)
fmt.Fprintf(&b, "error: %s\n", pis.Error)
fmt.Fprintf(&b, "attemptCount: %d\n", pis.AttemptCount)
fmt.Fprintf(&b, "priority: %t\n", pis.PriorityPin)
return b.String()
}
// PinInfo holds information about local pins. This is used by the Pin // PinInfo holds information about local pins. This is used by the Pin
// Trackers. // Trackers.
type PinInfo struct { type PinInfo struct {
@ -347,6 +424,17 @@ func (pi PinInfo) Defined() bool {
return pi.Cid.Defined() return pi.Cid.Defined()
} }
// String provides a string representation of PinInfo.
func (pi PinInfo) String() string {
var b strings.Builder
fmt.Fprintf(&b, "cid: %s\n", pi.Cid)
fmt.Fprintf(&b, "name: %s\n", pi.Name)
fmt.Fprintf(&b, "peer: %s\n", pi.Peer)
fmt.Fprintf(&b, "allocations: %v\n", pi.Allocations)
fmt.Fprintf(&b, "%s\n", pi.PinInfoShort)
return b.String()
}
// Version holds version information // Version holds version information
type Version struct { type Version struct {
Version string `json:"version" codec:"v"` Version string `json:"version" codec:"v"`
@ -571,6 +659,17 @@ func (pm PinMode) String() string {
} }
} }
// ToIPFSPinStatus converts a PinMode to IPFSPinStatus.
func (pm PinMode) ToIPFSPinStatus() IPFSPinStatus {
if pm == PinModeDirect {
return IPFSPinStatusDirect
}
if pm == PinModeRecursive {
return IPFSPinStatusRecursive
}
return IPFSPinStatusBug
}
// MarshalJSON converts the PinMode into a readable string in JSON. // MarshalJSON converts the PinMode into a readable string in JSON.
func (pm PinMode) MarshalJSON() ([]byte, error) { func (pm PinMode) MarshalJSON() ([]byte, error) {
return json.Marshal(pm.String()) return json.Marshal(pm.String())

View File

@ -271,7 +271,16 @@ func (c *Cluster) watchPinset() {
stateSyncTimer.Reset(c.config.StateSyncInterval) stateSyncTimer.Reset(c.config.StateSyncInterval)
case <-recoverTimer.C: case <-recoverTimer.C:
logger.Debug("auto-triggering RecoverAllLocal()") logger.Debug("auto-triggering RecoverAllLocal()")
c.RecoverAllLocal(ctx)
out := make(chan api.PinInfo, 1024)
go func() {
for range out {
}
}()
err := c.RecoverAllLocal(ctx, out)
if err != nil {
logger.Error(err)
}
recoverTimer.Reset(c.config.PinRecoverInterval) recoverTimer.Reset(c.config.PinRecoverInterval)
case <-c.ctx.Done(): case <-c.ctx.Done():
if !stateSyncTimer.Stop() { if !stateSyncTimer.Stop() {
@ -436,6 +445,12 @@ func (c *Cluster) pushPingMetrics(ctx context.Context) {
ticker := time.NewTicker(c.config.MonitorPingInterval) ticker := time.NewTicker(c.config.MonitorPingInterval)
for { for {
select {
case <-ctx.Done():
return
default:
}
c.sendPingMetric(ctx) c.sendPingMetric(ctx)
select { select {
@ -507,11 +522,13 @@ func (c *Cluster) alertsHandler() {
return return
} }
pinCh, err := cState.List(c.ctx) pinCh := make(chan api.Pin, 1024)
if err != nil { go func() {
logger.Warn(err) err = cState.List(c.ctx, pinCh)
return if err != nil {
} logger.Warn(err)
}
}()
for pin := range pinCh { for pin := range pinCh {
if containsPeer(pin.Allocations, alrt.Peer) && distance.isClosest(pin.Cid) { if containsPeer(pin.Allocations, alrt.Peer) && distance.isClosest(pin.Cid) {
@ -529,11 +546,17 @@ func (c *Cluster) watchPeers() {
defer ticker.Stop() defer ticker.Stop()
for { for {
select {
case <-c.ctx.Done():
return
default:
}
select { select {
case <-c.ctx.Done(): case <-c.ctx.Done():
return return
case <-ticker.C: case <-ticker.C:
// logger.Debugf("%s watching peers", c.id) //logger.Debugf("%s watching peers", c.id)
hasMe := false hasMe := false
peers, err := c.consensus.Peers(c.ctx) peers, err := c.consensus.Peers(c.ctx)
if err != nil { if err != nil {
@ -594,11 +617,14 @@ func (c *Cluster) vacatePeer(ctx context.Context, p peer.ID) {
logger.Warn(err) logger.Warn(err)
return return
} }
pinCh, err := cState.List(ctx)
if err != nil { pinCh := make(chan api.Pin, 1024)
logger.Warn(err) go func() {
return err = cState.List(ctx, pinCh)
} if err != nil {
logger.Warn(err)
}
}()
for pin := range pinCh { for pin := range pinCh {
if containsPeer(pin.Allocations, p) { if containsPeer(pin.Allocations, p) {
@ -1070,7 +1096,13 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
} }
// Start pinning items in the state that are not on IPFS yet. // Start pinning items in the state that are not on IPFS yet.
c.RecoverAllLocal(ctx) out := make(chan api.PinInfo, 1024)
// discard outputs
go func() {
for range out {
}
}()
go c.RecoverAllLocal(ctx, out)
logger.Infof("%s: joined %s's cluster", c.id.Pretty(), pid.Pretty()) logger.Infof("%s: joined %s's cluster", c.id.Pretty(), pid.Pretty())
return nil return nil
@ -1100,6 +1132,8 @@ func (c *Cluster) distances(ctx context.Context, exclude peer.ID) (*distanceChec
func (c *Cluster) StateSync(ctx context.Context) error { func (c *Cluster) StateSync(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "cluster/StateSync") _, span := trace.StartSpan(ctx, "cluster/StateSync")
defer span.End() defer span.End()
logger.Debug("StateSync")
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
if c.config.FollowerMode { if c.config.FollowerMode {
@ -1122,10 +1156,13 @@ func (c *Cluster) StateSync(ctx context.Context) error {
return err // could not list peers return err // could not list peers
} }
clusterPins, err := cState.List(ctx) clusterPins := make(chan api.Pin, 1024)
if err != nil { go func() {
return err err = cState.List(ctx, clusterPins)
} if err != nil {
logger.Error(err)
}
}()
// Unpin expired items when we are the closest peer to them. // Unpin expired items when we are the closest peer to them.
for p := range clusterPins { for p := range clusterPins {
@ -1140,24 +1177,29 @@ func (c *Cluster) StateSync(ctx context.Context) error {
return nil return nil
} }
// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers. // StatusAll returns the GlobalPinInfo for all tracked Cids in all peers on
// If an error happens, the slice will contain as much information as // the out channel. This is done by broacasting a StatusAll to all peers. If
// could be fetched from other peers. // an error happens, it is returned. This method blocks until it finishes. The
func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus) ([]api.GlobalPinInfo, error) { // operation can be aborted by cancelling the context.
func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
_, span := trace.StartSpan(ctx, "cluster/StatusAll") _, span := trace.StartSpan(ctx, "cluster/StatusAll")
defer span.End() defer span.End()
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
return c.globalPinInfoSlice(ctx, "PinTracker", "StatusAll", filter) in := make(chan api.TrackerStatus, 1)
in <- filter
close(in)
return c.globalPinInfoStream(ctx, "PinTracker", "StatusAll", in, out)
} }
// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer. // StatusAllLocal returns the PinInfo for all the tracked Cids in this peer on
func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus) []api.PinInfo { // the out channel. It blocks until finished.
func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error {
_, span := trace.StartSpan(ctx, "cluster/StatusAllLocal") _, span := trace.StartSpan(ctx, "cluster/StatusAllLocal")
defer span.End() defer span.End()
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
return c.tracker.StatusAll(ctx, filter) return c.tracker.StatusAll(ctx, filter, out)
} }
// Status returns the GlobalPinInfo for a given Cid as fetched from all // Status returns the GlobalPinInfo for a given Cid as fetched from all
@ -1206,13 +1248,15 @@ func (c *Cluster) localPinInfoOp(
return pInfo, err return pInfo, err
} }
// RecoverAll triggers a RecoverAllLocal operation on all peers. // RecoverAll triggers a RecoverAllLocal operation on all peers and returns
func (c *Cluster) RecoverAll(ctx context.Context) ([]api.GlobalPinInfo, error) { // GlobalPinInfo objets for all recovered items. This method blocks until
// finished. Operation can be aborted by cancelling the context.
func (c *Cluster) RecoverAll(ctx context.Context, out chan<- api.GlobalPinInfo) error {
_, span := trace.StartSpan(ctx, "cluster/RecoverAll") _, span := trace.StartSpan(ctx, "cluster/RecoverAll")
defer span.End() defer span.End()
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
return c.globalPinInfoSlice(ctx, "Cluster", "RecoverAllLocal", nil) return c.globalPinInfoStream(ctx, "Cluster", "RecoverAllLocal", nil, out)
} }
// RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked // RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked
@ -1222,15 +1266,16 @@ func (c *Cluster) RecoverAll(ctx context.Context) ([]api.GlobalPinInfo, error) {
// is faster than calling Pin on the same CID as it avoids committing an // is faster than calling Pin on the same CID as it avoids committing an
// identical pin to the consensus layer. // identical pin to the consensus layer.
// //
// It returns the list of pins that were re-queued for pinning. // It returns the list of pins that were re-queued for pinning on the out
// channel. It blocks until done.
// //
// RecoverAllLocal is called automatically every PinRecoverInterval. // RecoverAllLocal is called automatically every PinRecoverInterval.
func (c *Cluster) RecoverAllLocal(ctx context.Context) ([]api.PinInfo, error) { func (c *Cluster) RecoverAllLocal(ctx context.Context, out chan<- api.PinInfo) error {
_, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal") _, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal")
defer span.End() defer span.End()
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
return c.tracker.RecoverAll(ctx) return c.tracker.RecoverAll(ctx, out)
} }
// Recover triggers a recover operation for a given Cid in all // Recover triggers a recover operation for a given Cid in all
@ -1261,48 +1306,45 @@ func (c *Cluster) RecoverLocal(ctx context.Context, h cid.Cid) (api.PinInfo, err
return c.localPinInfoOp(ctx, h, c.tracker.Recover) return c.localPinInfoOp(ctx, h, c.tracker.Recover)
} }
// PinsChannel returns a channel from which to read all the pins in the // Pins sends pins on the given out channel as it iterates the full
// pinset, which are part of the current global state. This is the source of // pinset (current global state). This is the source of truth as to which pins
// truth as to which pins are managed and their allocation, but does not // are managed and their allocation, but does not indicate if the item is
// indicate if the item is successfully pinned. For that, use the Status*() // successfully pinned. For that, use the Status*() methods.
// methods.
// //
// The channel can be aborted by cancelling the context. // The operation can be aborted by cancelling the context. This methods blocks
func (c *Cluster) PinsChannel(ctx context.Context) (<-chan api.Pin, error) { // until the operation has completed.
_, span := trace.StartSpan(ctx, "cluster/PinsChannel") func (c *Cluster) Pins(ctx context.Context, out chan<- api.Pin) error {
_, span := trace.StartSpan(ctx, "cluster/Pins")
defer span.End() defer span.End()
ctx = trace.NewContext(c.ctx, span) ctx = trace.NewContext(c.ctx, span)
cState, err := c.consensus.State(ctx) cState, err := c.consensus.State(ctx)
if err != nil { if err != nil {
logger.Error(err) logger.Error(err)
return nil, err return err
} }
return cState.List(ctx) return cState.List(ctx, out)
} }
// Pins returns the list of Cids managed by Cluster and which are part // pinsSlice returns the list of Cids managed by Cluster and which are part
// of the current global state. This is the source of truth as to which // of the current global state. This is the source of truth as to which
// pins are managed and their allocation, but does not indicate if // pins are managed and their allocation, but does not indicate if
// the item is successfully pinned. For that, use StatusAll(). // the item is successfully pinned. For that, use StatusAll().
// //
// It is recommended to use PinsChannel(), as this method is equivalent to // It is recommended to use PinsChannel(), as this method is equivalent to
// loading the full pinset in memory! // loading the full pinset in memory!
func (c *Cluster) Pins(ctx context.Context) ([]api.Pin, error) { func (c *Cluster) pinsSlice(ctx context.Context) ([]api.Pin, error) {
_, span := trace.StartSpan(ctx, "cluster/Pins") out := make(chan api.Pin, 1024)
defer span.End() var err error
ctx = trace.NewContext(c.ctx, span) go func() {
err = c.Pins(ctx, out)
ch, err := c.PinsChannel(ctx) }()
if err != nil {
return nil, err
}
var pins []api.Pin var pins []api.Pin
for pin := range ch { for pin := range out {
pins = append(pins, pin) pins = append(pins, pin)
} }
return pins, ctx.Err() return pins, err
} }
// PinGet returns information for a single Cid managed by Cluster. // PinGet returns information for a single Cid managed by Cluster.
@ -1751,14 +1793,12 @@ func (c *Cluster) peersWithFilter(ctx context.Context, peers []peer.ID) []api.ID
if rpc.IsAuthorizationError(err) { if rpc.IsAuthorizationError(err) {
continue continue
} }
ids[i] = api.ID{} ids[i] = api.ID{}
ids[i].ID = peers[i] ids[i].ID = peers[i]
ids[i].Error = err.Error() ids[i].Error = err.Error()
} }
return ids return ids
} }
// getTrustedPeers gives listed of trusted peers except the current peer and // getTrustedPeers gives listed of trusted peers except the current peer and
@ -1935,15 +1975,18 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
return gpin, nil return gpin, nil
} }
func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, arg interface{}) ([]api.GlobalPinInfo, error) { func (c *Cluster) globalPinInfoStream(ctx context.Context, comp, method string, inChan interface{}, out chan<- api.GlobalPinInfo) error {
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoSlice") defer close(out)
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoStream")
defer span.End() defer span.End()
if arg == nil { if inChan == nil {
arg = struct{}{} emptyChan := make(chan struct{})
close(emptyChan)
inChan = emptyChan
} }
infos := make([]api.GlobalPinInfo, 0)
fullMap := make(map[cid.Cid]api.GlobalPinInfo) fullMap := make(map[cid.Cid]api.GlobalPinInfo)
var members []peer.ID var members []peer.ID
@ -1954,27 +1997,31 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
members, err = c.consensus.Peers(ctx) members, err = c.consensus.Peers(ctx)
if err != nil { if err != nil {
logger.Error(err) logger.Error(err)
return nil, err return err
} }
} }
lenMembers := len(members)
replies := make([][]api.PinInfo, lenMembers) msOut := make(chan api.PinInfo)
// We don't have a good timeout proposal for this. Depending on the // We don't have a good timeout proposal for this. Depending on the
// size of the state and the peformance of IPFS and the network, this // size of the state and the peformance of IPFS and the network, this
// may take moderately long. // may take moderately long.
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers) // If we did, this is the place to put it.
defer rpcutil.MultiCancel(cancels) ctx, cancel := context.WithCancel(ctx)
defer cancel()
errs := c.rpcClient.MultiCall( errsCh := make(chan []error, 1)
ctxs, go func() {
members, defer close(errsCh)
comp, errsCh <- c.rpcClient.MultiStream(
method, ctx,
arg, members,
rpcutil.CopyPinInfoSliceToIfaces(replies), comp,
) method,
inChan,
msOut,
)
}()
setPinInfo := func(p api.PinInfo) { setPinInfo := func(p api.PinInfo) {
if !p.Defined() { if !p.Defined() {
@ -1989,20 +2036,25 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
fullMap[p.Cid] = info fullMap[p.Cid] = info
} }
// make the big collection.
for pin := range msOut {
setPinInfo(pin)
}
// This WAITs until MultiStream is DONE.
erroredPeers := make(map[peer.ID]string) erroredPeers := make(map[peer.ID]string)
for i, r := range replies { errs, ok := <-errsCh
if e := errs[i]; e != nil { // This error must come from not being able to contact that cluster member if ok {
if rpc.IsAuthorizationError(e) { for i, err := range errs {
logger.Debug("rpc auth error", e) if err == nil {
continue continue
} }
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e) if rpc.IsAuthorizationError(err) {
erroredPeers[members[i]] = e.Error() logger.Debug("rpc auth error", err)
continue continue
} }
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], err)
for _, pin := range r { erroredPeers[members[i]] = err.Error()
setPinInfo(pin)
} }
} }
@ -2031,10 +2083,16 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string, a
} }
for _, v := range fullMap { for _, v := range fullMap {
infos = append(infos, v) select {
case <-ctx.Done():
err := fmt.Errorf("%s.%s aborted: %w", comp, method, ctx.Err())
logger.Error(err)
return err
case out <- v:
}
} }
return infos, nil return nil
} }
func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) { func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) {

View File

@ -64,17 +64,17 @@ func (ipfs *mockConnector) Pin(ctx context.Context, pin api.Pin) error {
if pin.Cid == test.ErrorCid { if pin.Cid == test.ErrorCid {
return errors.New("trying to pin ErrorCid") return errors.New("trying to pin ErrorCid")
} }
ipfs.pins.Store(pin.Cid.String(), pin.MaxDepth) ipfs.pins.Store(pin.Cid, pin.MaxDepth)
return nil return nil
} }
func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error { func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error {
ipfs.pins.Delete(c.String()) ipfs.pins.Delete(c)
return nil return nil
} }
func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinStatus, error) { func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinStatus, error) {
dI, ok := ipfs.pins.Load(pin.Cid.String()) dI, ok := ipfs.pins.Load(pin.Cid)
if !ok { if !ok {
return api.IPFSPinStatusUnpinned, nil return api.IPFSPinStatusUnpinned, nil
} }
@ -85,8 +85,9 @@ func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSP
return api.IPFSPinStatusRecursive, nil return api.IPFSPinStatusRecursive, nil
} }
func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string]api.IPFSPinStatus, error) { func (ipfs *mockConnector) PinLs(ctx context.Context, in []string, out chan<- api.IPFSPinInfo) error {
m := make(map[string]api.IPFSPinStatus) defer close(out)
var st api.IPFSPinStatus var st api.IPFSPinStatus
ipfs.pins.Range(func(k, v interface{}) bool { ipfs.pins.Range(func(k, v interface{}) bool {
switch v.(api.PinDepth) { switch v.(api.PinDepth) {
@ -95,12 +96,13 @@ func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string
default: default:
st = api.IPFSPinStatusRecursive st = api.IPFSPinStatusRecursive
} }
c := k.(cid.Cid)
m[k.(string)] = st out <- api.IPFSPinInfo{Cid: api.Cid(c), Type: st}
return true return true
}) })
return m, nil return nil
} }
func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) { func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
@ -795,7 +797,7 @@ func TestClusterPins(t *testing.T) {
pinDelay() pinDelay()
pins, err := cl.Pins(ctx) pins, err := cl.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -942,10 +944,16 @@ func TestClusterRecoverAllLocal(t *testing.T) {
pinDelay() pinDelay()
recov, err := cl.RecoverAllLocal(ctx) out := make(chan api.PinInfo, 10)
if err != nil { go func() {
t.Error("did not expect an error") err := cl.RecoverAllLocal(ctx, out)
} if err != nil {
t.Error("did not expect an error")
}
}()
recov := collectPinInfos(t, out)
if len(recov) != 1 { if len(recov) != 1 {
t.Fatalf("there should be one pin recovered, got = %d", len(recov)) t.Fatalf("there should be one pin recovered, got = %d", len(recov))
} }

View File

@ -39,17 +39,23 @@ func jsonFormatObject(resp interface{}) {
} }
func jsonFormatPrint(obj interface{}) { func jsonFormatPrint(obj interface{}) {
print := func(o interface{}) {
j, err := json.MarshalIndent(o, "", " ")
checkErr("generating json output", err)
fmt.Printf("%s\n", j)
}
switch r := obj.(type) { switch r := obj.(type) {
case chan api.Pin: case chan api.Pin:
for o := range r { for o := range r {
j, err := json.MarshalIndent(o, "", " ") print(o)
checkErr("generating json output", err) }
fmt.Printf("%s\n", j) case chan api.GlobalPinInfo:
for o := range r {
print(o)
} }
default: default:
j, err := json.MarshalIndent(obj, "", " ") print(obj)
checkErr("generating json output", err)
fmt.Printf("%s\n", j)
} }
} }
@ -82,8 +88,8 @@ func textFormatObject(resp interface{}) {
for _, item := range r { for _, item := range r {
textFormatObject(item) textFormatObject(item)
} }
case []api.GlobalPinInfo: case chan api.GlobalPinInfo:
for _, item := range r { for item := range r {
textFormatObject(item) textFormatObject(item)
} }
case chan api.Pin: case chan api.Pin:

View File

@ -888,21 +888,31 @@ separated list). The following are valid status values:
checkErr("parsing cid", err) checkErr("parsing cid", err)
cids[i] = ci cids[i] = ci
} }
if len(cids) == 1 { out := make(chan api.GlobalPinInfo, 1024)
resp, cerr := globalClient.Status(ctx, cids[0], c.Bool("local")) chErr := make(chan error, 1)
formatResponse(c, resp, cerr) go func() {
} else if len(cids) > 1 { defer close(chErr)
resp, cerr := globalClient.StatusCids(ctx, cids, c.Bool("local"))
formatResponse(c, resp, cerr) if len(cids) == 1 {
} else { resp, cerr := globalClient.Status(ctx, cids[0], c.Bool("local"))
filterFlag := c.String("filter") out <- resp
filter := api.TrackerStatusFromString(c.String("filter")) chErr <- cerr
if filter == api.TrackerStatusUndefined && filterFlag != "" { close(out)
checkErr("parsing filter flag", errors.New("invalid filter name")) } else if len(cids) > 1 {
chErr <- globalClient.StatusCids(ctx, cids, c.Bool("local"), out)
} else {
filterFlag := c.String("filter")
filter := api.TrackerStatusFromString(c.String("filter"))
if filter == api.TrackerStatusUndefined && filterFlag != "" {
checkErr("parsing filter flag", errors.New("invalid filter name"))
}
chErr <- globalClient.StatusAll(ctx, filter, c.Bool("local"), out)
} }
resp, cerr := globalClient.StatusAll(ctx, filter, c.Bool("local")) }()
formatResponse(c, resp, cerr)
} formatResponse(c, out, nil)
err := <-chErr
formatResponse(c, nil, err)
return nil return nil
}, },
}, },
@ -932,8 +942,15 @@ operations on the contacted peer (as opposed to on every peer).
resp, cerr := globalClient.Recover(ctx, ci, c.Bool("local")) resp, cerr := globalClient.Recover(ctx, ci, c.Bool("local"))
formatResponse(c, resp, cerr) formatResponse(c, resp, cerr)
} else { } else {
resp, cerr := globalClient.RecoverAll(ctx, c.Bool("local")) out := make(chan api.GlobalPinInfo, 1024)
formatResponse(c, resp, cerr) errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- globalClient.RecoverAll(ctx, c.Bool("local"), out)
}()
formatResponse(c, out, nil)
err := <-errCh
formatResponse(c, nil, err)
} }
return nil return nil
}, },

View File

@ -493,14 +493,17 @@ func printStatusOnline(absPath, clusterName string) error {
if err != nil { if err != nil {
return cli.Exit(errors.Wrap(err, "error creating client"), 1) return cli.Exit(errors.Wrap(err, "error creating client"), 1)
} }
gpis, err := client.StatusAll(ctx, 0, true)
if err != nil {
return err
}
// do not return errors after this. out := make(chan api.GlobalPinInfo, 1024)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- client.StatusAll(ctx, 0, true, out)
}()
var pid string var pid string
for _, gpi := range gpis { for gpi := range out {
if pid == "" { // do this once if pid == "" { // do this once
// PeerMap will only have one key // PeerMap will only have one key
for k := range gpi.PeerMap { for k := range gpi.PeerMap {
@ -511,7 +514,8 @@ func printStatusOnline(absPath, clusterName string) error {
pinInfo := gpi.PeerMap[pid] pinInfo := gpi.PeerMap[pid]
printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error) printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error)
} }
return nil err = <-errCh
return err
} }
func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error { func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
@ -528,14 +532,20 @@ func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
if err != nil { if err != nil {
return err return err
} }
pins, err := st.List(context.Background())
if err != nil { out := make(chan api.Pin, 1024)
return err errCh := make(chan error, 1)
} go func() {
for pin := range pins { defer close(errCh)
errCh <- st.List(context.Background(), out)
}()
for pin := range out {
printPin(pin.Cid, "offline", pin.Name, "") printPin(pin.Cid, "offline", pin.Name, "")
} }
return nil
err = <-errCh
return err
} }
func printPin(c cid.Cid, status, name, err string) { func printPin(c cid.Cid, status, name, err string) {

View File

@ -222,16 +222,22 @@ func importState(r io.Reader, st state.State, opts api.PinOptions) error {
// ExportState saves a json representation of a state // ExportState saves a json representation of a state
func exportState(w io.Writer, st state.State) error { func exportState(w io.Writer, st state.State) error {
pins, err := st.List(context.Background()) out := make(chan api.Pin, 10000)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- st.List(context.Background(), out)
}()
var err error
enc := json.NewEncoder(w)
for pin := range out {
if err == nil {
err = enc.Encode(pin)
}
}
if err != nil { if err != nil {
return err return err
} }
enc := json.NewEncoder(w) err = <-errCh
for pin := range pins { return err
err := enc.Encode(pin)
if err != nil {
return err
}
}
return nil
} }

View File

@ -125,14 +125,14 @@ func TestConsensusPin(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) out := make(chan api.Pin, 10)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range out {
for p := range ch {
pins = append(pins, p) pins = append(pins, p)
} }
@ -186,14 +186,16 @@ func TestConsensusUpdate(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) // Channel will not block sending because plenty of space
out := make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -243,14 +245,15 @@ func TestConsensusAddRmPeer(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) out := make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -310,14 +313,15 @@ func TestConsensusDistrustPeer(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) out := make(chan api.Pin, 10)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -372,14 +376,15 @@ func TestOfflineState(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
ch, err := offlineState.List(ctx) out := make(chan api.Pin, 100)
err = offlineState.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -412,14 +417,15 @@ func TestBatching(t *testing.T) {
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
ch, err := st.List(ctx) out := make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -430,14 +436,15 @@ func TestBatching(t *testing.T) {
// Trigger batch auto-commit by time // Trigger batch auto-commit by time
time.Sleep(time.Second) time.Sleep(time.Second)
ch, err = st.List(ctx) out = make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pins = nil pins = nil
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -456,13 +463,14 @@ func TestBatching(t *testing.T) {
// Give a chance for things to persist // Give a chance for things to persist
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
ch, err = st.List(ctx) out = make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pins = nil pins = nil
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -472,12 +480,14 @@ func TestBatching(t *testing.T) {
// wait for the last pin // wait for the last pin
time.Sleep(time.Second) time.Sleep(time.Second)
ch, err = st.List(ctx)
out = make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
pins = nil pins = nil
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }

View File

@ -99,13 +99,14 @@ func TestConsensusPin(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) out := make(chan api.Pin, 10)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -154,13 +155,14 @@ func TestConsensusUpdate(t *testing.T) {
t.Fatal("error getting state:", err) t.Fatal("error getting state:", err)
} }
ch, err := st.List(ctx) out := make(chan api.Pin, 10)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -330,13 +332,15 @@ func TestRaftLatestSnapshot(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Snapshot bytes returned could not restore to state: ", err) t.Fatal("Snapshot bytes returned could not restore to state: ", err)
} }
ch, err := snapState.List(ctx)
out := make(chan api.Pin, 100)
err = snapState.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }

View File

@ -27,13 +27,14 @@ func TestApplyToPin(t *testing.T) {
} }
op.ApplyTo(st) op.ApplyTo(st)
ch, err := st.List(ctx) out := make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
var pins []api.Pin var pins []api.Pin
for p := range ch { for p := range out {
pins = append(pins, p) pins = append(pins, p)
} }
@ -59,11 +60,13 @@ func TestApplyToUnpin(t *testing.T) {
} }
st.Add(ctx, testPin(test.Cid1)) st.Add(ctx, testPin(test.Cid1))
op.ApplyTo(st) op.ApplyTo(st)
pins, err := st.List(ctx)
out := make(chan api.Pin, 100)
err = st.List(ctx, out)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(pins) != 0 { if len(out) != 0 {
t.Error("the state was not modified correctly") t.Error("the state was not modified correctly")
} }
} }

16
go.mod
View File

@ -18,7 +18,7 @@ require (
github.com/ipfs/go-cid v0.1.0 github.com/ipfs/go-cid v0.1.0
github.com/ipfs/go-datastore v0.5.1 github.com/ipfs/go-datastore v0.5.1
github.com/ipfs/go-ds-badger v0.3.0 github.com/ipfs/go-ds-badger v0.3.0
github.com/ipfs/go-ds-crdt v0.3.3 github.com/ipfs/go-ds-crdt v0.3.4
github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-leveldb v0.5.0
github.com/ipfs/go-fs-lock v0.0.7 github.com/ipfs/go-fs-lock v0.0.7
github.com/ipfs/go-ipfs-api v0.3.0 github.com/ipfs/go-ipfs-api v0.3.0
@ -30,10 +30,10 @@ require (
github.com/ipfs/go-ipfs-pinner v0.2.1 github.com/ipfs/go-ipfs-pinner v0.2.1
github.com/ipfs/go-ipfs-posinfo v0.0.1 github.com/ipfs/go-ipfs-posinfo v0.0.1
github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-cbor v0.0.6
github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-ipld-format v0.3.0
github.com/ipfs/go-ipns v0.1.2 github.com/ipfs/go-ipns v0.1.2
github.com/ipfs/go-log/v2 v2.5.0 github.com/ipfs/go-log/v2 v2.5.0
github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-merkledag v0.6.0
github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164
github.com/ipfs/go-path v0.2.2 github.com/ipfs/go-path v0.2.2
github.com/ipfs/go-unixfs v0.3.1 github.com/ipfs/go-unixfs v0.3.1
@ -45,7 +45,7 @@ require (
github.com/libp2p/go-libp2p-connmgr v0.3.1 github.com/libp2p/go-libp2p-connmgr v0.3.1
github.com/libp2p/go-libp2p-consensus v0.0.1 github.com/libp2p/go-libp2p-consensus v0.0.1
github.com/libp2p/go-libp2p-core v0.13.0 github.com/libp2p/go-libp2p-core v0.13.0
github.com/libp2p/go-libp2p-gorpc v0.3.0 github.com/libp2p/go-libp2p-gorpc v0.3.1
github.com/libp2p/go-libp2p-gostream v0.3.1 github.com/libp2p/go-libp2p-gostream v0.3.1
github.com/libp2p/go-libp2p-http v0.2.1 github.com/libp2p/go-libp2p-http v0.2.1
github.com/libp2p/go-libp2p-kad-dht v0.15.0 github.com/libp2p/go-libp2p-kad-dht v0.15.0
@ -119,14 +119,14 @@ require (
github.com/huin/goupnp v1.0.2 // indirect github.com/huin/goupnp v1.0.2 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.0.0 // indirect github.com/ipfs/go-bitfield v1.0.0 // indirect
github.com/ipfs/go-bitswap v0.5.1 // indirect github.com/ipfs/go-bitswap v0.6.0 // indirect
github.com/ipfs/go-blockservice v0.2.1 // indirect github.com/ipfs/go-blockservice v0.3.0 // indirect
github.com/ipfs/go-cidutil v0.0.2 // indirect github.com/ipfs/go-cidutil v0.0.2 // indirect
github.com/ipfs/go-fetcher v1.6.1 // indirect github.com/ipfs/go-fetcher v1.6.1 // indirect
github.com/ipfs/go-ipfs-blockstore v1.1.2 // indirect github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
github.com/ipfs/go-ipfs-exchange-interface v0.1.0 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.1.0 // indirect
github.com/ipfs/go-ipfs-exchange-offline v0.1.1 // indirect github.com/ipfs/go-ipfs-exchange-offline v0.2.0 // indirect
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
github.com/ipfs/go-ipfs-provider v0.7.1 // indirect github.com/ipfs/go-ipfs-provider v0.7.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect

26
go.sum
View File

@ -424,8 +424,9 @@ github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiL
github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps=
github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM=
github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI=
github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco=
github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo=
github.com/ipfs/go-bitswap v0.6.0 h1:f2rc6GZtoSFhEIzQmddgGiel9xntj02Dg0ZNf2hSC+w=
github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA=
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc=
@ -434,8 +435,9 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU=
github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ=
github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8=
github.com/ipfs/go-blockservice v0.3.0 h1:cDgcZ+0P0Ih3sl8+qjFr2sVaMdysg/YZpLj5WJ8kiiw=
github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
@ -469,8 +471,8 @@ github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBR
github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA=
github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro=
github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek=
github.com/ipfs/go-ds-crdt v0.3.3 h1:Q7fj+bm/gCfHte3axLQuCEzK1Uhsxgf065WLRvfeb0w= github.com/ipfs/go-ds-crdt v0.3.4 h1:O/dFBkxxXxNO9cjfQwFQHTsoehfJtV1GNAhuRmLh2Dg=
github.com/ipfs/go-ds-crdt v0.3.3/go.mod h1:rcfJixHEd+hIWcu/8SecC/lVlNcAkhE6DNgRKPd1xgU= github.com/ipfs/go-ds-crdt v0.3.4/go.mod h1:bFHBkP56kWufO55QxAKT7qZqz23thrh7FN5l+hYTHa4=
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
@ -488,8 +490,9 @@ github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE=
github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw=
github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY=
github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw=
github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE=
github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
@ -510,8 +513,9 @@ github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFq
github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo=
github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI=
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g=
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
github.com/ipfs/go-ipfs-exchange-offline v0.2.0 h1:2PF4o4A7W656rC0RxuhUace997FTcDTcIQ6NoEtyjAI=
github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY=
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84=
@ -541,8 +545,9 @@ github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eB
github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA=
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA=
github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs=
github.com/ipfs/go-ipld-format v0.3.0 h1:Mwm2oRLzIuUwEPewWAWyMuuBQUsn3awfFEYVb8akMOQ=
github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM=
github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA=
github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI=
github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI=
@ -565,8 +570,9 @@ github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOL
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk=
github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4=
github.com/ipfs/go-merkledag v0.6.0 h1:oV5WT2321tS4YQVOPgIrWHvJ0lJobRTerU+i9nmUCuA=
github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0=
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 h1:0ATu9s5KktHhm8aYRSe1ysOJPik3dRwU/uag1Bcz+tg= github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 h1:0ATu9s5KktHhm8aYRSe1ysOJPik3dRwU/uag1Bcz+tg=
@ -774,8 +780,8 @@ github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKB
github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo=
github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8=
github.com/libp2p/go-libp2p-gorpc v0.1.0/go.mod h1:DrswTLnu7qjLgbqe4fekX4ISoPiHUqtA45thTsJdE1w= github.com/libp2p/go-libp2p-gorpc v0.1.0/go.mod h1:DrswTLnu7qjLgbqe4fekX4ISoPiHUqtA45thTsJdE1w=
github.com/libp2p/go-libp2p-gorpc v0.3.0 h1:1ww39zPEclHh8p1Exk882Xhy3CK2gW+JZYd+6NZp+q0= github.com/libp2p/go-libp2p-gorpc v0.3.1 h1:ZmqQIgHccgh/Ff1kS3ZlwATZRLvtuRUd633/MLWAx20=
github.com/libp2p/go-libp2p-gorpc v0.3.0/go.mod h1:sRz9ybP9rlOkJB1v65SMLr+NUEPB/ioLZn26MWIV4DU= github.com/libp2p/go-libp2p-gorpc v0.3.1/go.mod h1:sRz9ybP9rlOkJB1v65SMLr+NUEPB/ioLZn26MWIV4DU=
github.com/libp2p/go-libp2p-gostream v0.3.0/go.mod h1:pLBQu8db7vBMNINGsAwLL/ZCE8wng5V1FThoaE5rNjc= github.com/libp2p/go-libp2p-gostream v0.3.0/go.mod h1:pLBQu8db7vBMNINGsAwLL/ZCE8wng5V1FThoaE5rNjc=
github.com/libp2p/go-libp2p-gostream v0.3.1 h1:XlwohsPn6uopGluEWs1Csv1QCEjrTXf2ZQagzZ5paAg= github.com/libp2p/go-libp2p-gostream v0.3.1 h1:XlwohsPn6uopGluEWs1Csv1QCEjrTXf2ZQagzZ5paAg=
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI= github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=

View File

@ -5,6 +5,7 @@ package numpin
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
"github.com/ipfs/ipfs-cluster/api" "github.com/ipfs/ipfs-cluster/api"
@ -19,7 +20,9 @@ var MetricName = "numpin"
// Informer is a simple object to implement the ipfscluster.Informer // Informer is a simple object to implement the ipfscluster.Informer
// and Component interfaces // and Component interfaces
type Informer struct { type Informer struct {
config *Config config *Config
mu sync.Mutex
rpcClient *rpc.Client rpcClient *rpc.Client
} }
@ -38,7 +41,9 @@ func NewInformer(cfg *Config) (*Informer, error) {
// SetClient provides us with an rpc.Client which allows // SetClient provides us with an rpc.Client which allows
// contacting other components in the cluster. // contacting other components in the cluster.
func (npi *Informer) SetClient(c *rpc.Client) { func (npi *Informer) SetClient(c *rpc.Client) {
npi.mu.Lock()
npi.rpcClient = c npi.rpcClient = c
npi.mu.Unlock()
} }
// Shutdown is called on cluster shutdown. We just invalidate // Shutdown is called on cluster shutdown. We just invalidate
@ -47,7 +52,9 @@ func (npi *Informer) Shutdown(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "informer/numpin/Shutdown") _, span := trace.StartSpan(ctx, "informer/numpin/Shutdown")
defer span.End() defer span.End()
npi.mu.Lock()
npi.rpcClient = nil npi.rpcClient = nil
npi.mu.Unlock()
return nil return nil
} }
@ -63,7 +70,11 @@ func (npi *Informer) GetMetrics(ctx context.Context) []api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric") ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric")
defer span.End() defer span.End()
if npi.rpcClient == nil { npi.mu.Lock()
rpcClient := npi.rpcClient
npi.mu.Unlock()
if rpcClient == nil {
return []api.Metric{ return []api.Metric{
{ {
Valid: false, Valid: false,
@ -71,24 +82,39 @@ func (npi *Informer) GetMetrics(ctx context.Context) []api.Metric {
} }
} }
pinMap := make(map[string]api.IPFSPinStatus)
// make use of the RPC API to obtain information // make use of the RPC API to obtain information
// about the number of pins in IPFS. See RPCAPI docs. // about the number of pins in IPFS. See RPCAPI docs.
err := npi.rpcClient.CallContext( in := make(chan []string, 1)
ctx, in <- []string{"recursive", "direct"}
"", // Local call close(in)
"IPFSConnector", // Service name out := make(chan api.IPFSPinInfo, 1024)
"PinLs", // Method name
"recursive", // in arg errCh := make(chan error, 1)
&pinMap, // out arg go func() {
) defer close(errCh)
err := rpcClient.Stream(
ctx,
"", // Local call
"IPFSConnector", // Service name
"PinLs", // Method name
in,
out,
)
errCh <- err
}()
n := 0
for range out {
n++
}
err := <-errCh
valid := err == nil valid := err == nil
m := api.Metric{ m := api.Metric{
Name: MetricName, Name: MetricName,
Value: fmt.Sprintf("%d", len(pinMap)), Value: fmt.Sprintf("%d", n),
Valid: valid, Valid: valid,
Partitionable: false, Partitionable: false,
} }

View File

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/ipfs/ipfs-cluster/api" "github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
rpc "github.com/libp2p/go-libp2p-gorpc" rpc "github.com/libp2p/go-libp2p-gorpc"
) )
@ -21,11 +22,10 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c return c
} }
func (mock *mockService) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error { func (mock *mockService) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
*out = map[string]api.IPFSPinStatus{ out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid1), Type: api.IPFSPinStatusRecursive}
"QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa": api.IPFSPinStatusRecursive, out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid2), Type: api.IPFSPinStatusRecursive}
"QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6": api.IPFSPinStatusRecursive, close(out)
}
return nil return nil
} }

View File

@ -78,7 +78,8 @@ type IPFSConnector interface {
Pin(context.Context, api.Pin) error Pin(context.Context, api.Pin) error
Unpin(context.Context, cid.Cid) error Unpin(context.Context, cid.Cid) error
PinLsCid(context.Context, api.Pin) (api.IPFSPinStatus, error) PinLsCid(context.Context, api.Pin) (api.IPFSPinStatus, error)
PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error) // PinLs returns pins in the pinset of the given types (recursive, direct...)
PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error
// ConnectSwarms make sure this peer's IPFS daemon is connected to // ConnectSwarms make sure this peer's IPFS daemon is connected to
// other peers IPFS daemons. // other peers IPFS daemons.
ConnectSwarms(context.Context) error ConnectSwarms(context.Context) error
@ -121,12 +122,11 @@ type PinTracker interface {
Untrack(context.Context, cid.Cid) error Untrack(context.Context, cid.Cid) error
// StatusAll returns the list of pins with their local status. Takes a // StatusAll returns the list of pins with their local status. Takes a
// filter to specify which statuses to report. // filter to specify which statuses to report.
StatusAll(context.Context, api.TrackerStatus) []api.PinInfo StatusAll(context.Context, api.TrackerStatus, chan<- api.PinInfo) error
// Status returns the local status of a given Cid. // Status returns the local status of a given Cid.
Status(context.Context, cid.Cid) api.PinInfo Status(context.Context, cid.Cid) api.PinInfo
// RecoverAll calls Recover() for all pins tracked. Returns only // RecoverAll calls Recover() for all pins tracked.
// informations for retriggered pins. RecoverAll(context.Context, chan<- api.PinInfo) error
RecoverAll(context.Context) ([]api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in a Cids with error status. // Recover retriggers a Pin/Unpin operation in a Cids with error status.
Recover(context.Context, cid.Cid) (api.PinInfo, error) Recover(context.Context, cid.Cid) (api.PinInfo, error)
} }

View File

@ -430,6 +430,48 @@ func shutdownCluster(t *testing.T, c *Cluster, m *test.IpfsMock) {
m.Close() m.Close()
} }
func collectGlobalPinInfos(t *testing.T, out <-chan api.GlobalPinInfo, timeout time.Duration) []api.GlobalPinInfo {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
var gpis []api.GlobalPinInfo
for {
select {
case <-ctx.Done():
t.Error(ctx.Err())
return gpis
case gpi, ok := <-out:
if !ok {
return gpis
}
gpis = append(gpis, gpi)
}
}
}
func collectPinInfos(t *testing.T, out <-chan api.PinInfo) []api.PinInfo {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var pis []api.PinInfo
for {
select {
case <-ctx.Done():
t.Error(ctx.Err())
return pis
case pi, ok := <-out:
if !ok {
return pis
}
pis = append(pis, pi)
}
}
}
func runF(t *testing.T, clusters []*Cluster, f func(*testing.T, *Cluster)) { func runF(t *testing.T, clusters []*Cluster, f func(*testing.T, *Cluster)) {
t.Helper() t.Helper()
var wg sync.WaitGroup var wg sync.WaitGroup
@ -654,12 +696,22 @@ func TestClustersPin(t *testing.T) {
} }
switch consensus { switch consensus {
case "crdt": case "crdt":
time.Sleep(20 * time.Second) time.Sleep(10 * time.Second)
default: default:
delay() delay()
} }
fpinned := func(t *testing.T, c *Cluster) { fpinned := func(t *testing.T, c *Cluster) {
status := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined) out := make(chan api.PinInfo, 10)
go func() {
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
if err != nil {
t.Error(err)
}
}()
status := collectPinInfos(t, out)
for _, v := range status { for _, v := range status {
if v.Status != api.TrackerStatusPinned { if v.Status != api.TrackerStatusPinned {
t.Errorf("%s should have been pinned but it is %s", v.Cid, v.Status) t.Errorf("%s should have been pinned but it is %s", v.Cid, v.Status)
@ -672,7 +724,7 @@ func TestClustersPin(t *testing.T) {
runF(t, clusters, fpinned) runF(t, clusters, fpinned)
// Unpin everything // Unpin everything
pinList, err := clusters[0].Pins(ctx) pinList, err := clusters[0].pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -692,7 +744,7 @@ func TestClustersPin(t *testing.T) {
switch consensus { switch consensus {
case "crdt": case "crdt":
time.Sleep(20 * time.Second) time.Sleep(10 * time.Second)
default: default:
delay() delay()
} }
@ -708,7 +760,15 @@ func TestClustersPin(t *testing.T) {
delay() delay()
funpinned := func(t *testing.T, c *Cluster) { funpinned := func(t *testing.T, c *Cluster) {
status := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined) out := make(chan api.PinInfo)
go func() {
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
if err != nil {
t.Error(err)
}
}()
status := collectPinInfos(t, out)
for _, v := range status { for _, v := range status {
t.Errorf("%s should have been unpinned but it is %s", v.Cid, v.Status) t.Errorf("%s should have been unpinned but it is %s", v.Cid, v.Status)
} }
@ -852,10 +912,15 @@ func TestClustersStatusAll(t *testing.T) {
pinDelay() pinDelay()
// Global status // Global status
f := func(t *testing.T, c *Cluster) { f := func(t *testing.T, c *Cluster) {
statuses, err := c.StatusAll(ctx, api.TrackerStatusUndefined) out := make(chan api.GlobalPinInfo, 10)
if err != nil { go func() {
t.Error(err) err := c.StatusAll(ctx, api.TrackerStatusUndefined, out)
} if err != nil {
t.Error(err)
}
}()
statuses := collectGlobalPinInfos(t, out, 5*time.Second)
if len(statuses) != 1 { if len(statuses) != 1 {
t.Fatal("bad status. Expected one item") t.Fatal("bad status. Expected one item")
} }
@ -920,10 +985,16 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
return return
} }
statuses, err := c.StatusAll(ctx, api.TrackerStatusUndefined) out := make(chan api.GlobalPinInfo, 10)
if err != nil { go func() {
t.Error(err) err := c.StatusAll(ctx, api.TrackerStatusUndefined, out)
} if err != nil {
t.Error(err)
}
}()
statuses := collectGlobalPinInfos(t, out, 5*time.Second)
if len(statuses) != 1 { if len(statuses) != 1 {
t.Fatal("bad status. Expected one item") t.Fatal("bad status. Expected one item")
} }
@ -1124,11 +1195,15 @@ func TestClustersRecoverAll(t *testing.T) {
pinDelay() pinDelay()
gInfos, err := clusters[rand.Intn(nClusters)].RecoverAll(ctx) out := make(chan api.GlobalPinInfo)
if err != nil { go func() {
t.Fatal(err) err := clusters[rand.Intn(nClusters)].RecoverAll(ctx, out)
} if err != nil {
delay() t.Error(err)
}
}()
gInfos := collectGlobalPinInfos(t, out, 5*time.Second)
if len(gInfos) != 1 { if len(gInfos) != 1 {
t.Error("expected one items") t.Error("expected one items")
@ -1219,7 +1294,15 @@ func TestClustersReplicationOverall(t *testing.T) {
f := func(t *testing.T, c *Cluster) { f := func(t *testing.T, c *Cluster) {
// confirm that the pintracker state matches the current global state // confirm that the pintracker state matches the current global state
pinfos := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined) out := make(chan api.PinInfo, 100)
go func() {
err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out)
if err != nil {
t.Error(err)
}
}()
pinfos := collectPinInfos(t, out)
if len(pinfos) != nClusters { if len(pinfos) != nClusters {
t.Error("Pinfos does not have the expected pins") t.Error("Pinfos does not have the expected pins")
} }
@ -1243,11 +1326,14 @@ func TestClustersReplicationOverall(t *testing.T) {
t.Errorf("%s: Expected 1 remote pin but got %d", c.id.String(), numRemote) t.Errorf("%s: Expected 1 remote pin but got %d", c.id.String(), numRemote)
} }
pins, err := c.Pins(ctx) outPins := make(chan api.Pin)
if err != nil { go func() {
t.Fatal(err) err := c.Pins(ctx, outPins)
} if err != nil {
for _, pin := range pins { t.Error(err)
}
}()
for pin := range outPins {
allocs := pin.Allocations allocs := pin.Allocations
if len(allocs) != nClusters-1 { if len(allocs) != nClusters-1 {
t.Errorf("Allocations are [%s]", allocs) t.Errorf("Allocations are [%s]", allocs)
@ -1623,7 +1709,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
// Let the pin arrive // Let the pin arrive
pinDelay() pinDelay()
pinList, err := clusters[j].Pins(ctx) pinList, err := clusters[j].pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1641,7 +1727,7 @@ func TestClustersReplicationRealloc(t *testing.T) {
pinDelay() pinDelay()
pinList2, err := clusters[j].Pins(ctx) pinList2, err := clusters[j].pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -2131,7 +2217,7 @@ func TestClusterPinsWithExpiration(t *testing.T) {
pinDelay() pinDelay()
pins, err := cl.Pins(ctx) pins, err := cl.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -2154,7 +2240,7 @@ func TestClusterPinsWithExpiration(t *testing.T) {
pinDelay() pinDelay()
// state sync should have unpinned expired pin // state sync should have unpinned expired pin
pins, err = cl.Pins(ctx) pins, err = cl.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -73,19 +73,25 @@ type ipfsError struct {
func (ie ipfsError) Error() string { func (ie ipfsError) Error() string {
return fmt.Sprintf( return fmt.Sprintf(
"IPFS request unsuccessful (%s). Code: %d. Message: %s", "IPFS error (%s). Code: %d. Message: %s",
ie.path, ie.path,
ie.code, ie.code,
ie.Message, ie.Message,
) )
} }
type ipfsPinType struct { type ipfsUnpinnedError ipfsError
Type string
func (unpinned ipfsUnpinnedError) Is(target error) bool {
ierr, ok := target.(ipfsError)
if !ok {
return false
}
return strings.HasSuffix(ierr.Message, "not pinned")
} }
type ipfsPinLsResp struct { func (unpinned ipfsUnpinnedError) Error() string {
Keys map[string]ipfsPinType return ipfsError(unpinned).Error()
} }
type ipfsIDResp struct { type ipfsIDResp struct {
@ -493,33 +499,62 @@ func (ipfs *Connector) Unpin(ctx context.Context, hash cid.Cid) error {
} }
// PinLs performs a "pin ls --type typeFilter" request against the configured // PinLs performs a "pin ls --type typeFilter" request against the configured
// IPFS daemon and returns a map of cid strings and their status. // IPFS daemon and sends the results on the given channel. Returns when done.
func (ipfs *Connector) PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error) { func (ipfs *Connector) PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error {
defer close(out)
bodies := make([]io.ReadCloser, len(typeFilters))
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLs") ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLs")
defer span.End() defer span.End()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel() defer cancel()
body, err := ipfs.postCtx(ctx, "pin/ls?type="+typeFilter, "", nil)
// Some error talking to the daemon var err error
if err != nil {
return nil, err nextFilter:
for i, typeFilter := range typeFilters {
// Post and read streaming response
path := "pin/ls?stream=true&type=" + typeFilter
bodies[i], err = ipfs.postCtxStreamResponse(ctx, path, "", nil)
if err != nil {
logger.Error("error querying pinset: %s", err)
return err
}
defer bodies[i].Close()
dec := json.NewDecoder(bodies[i])
for {
select {
case <-ctx.Done():
err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err())
logger.Error(err)
return err
default:
}
var ipfsPin api.IPFSPinInfo
err = dec.Decode(&ipfsPin)
if err == io.EOF {
break nextFilter
}
if err != nil {
err = fmt.Errorf("error decoding ipfs pin: %w", err)
return err
}
select {
case <-ctx.Done():
err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err())
logger.Error(err)
return err
case out <- ipfsPin:
}
}
} }
var res ipfsPinLsResp return nil
err = json.Unmarshal(body, &res)
if err != nil {
logger.Error("parsing pin/ls response")
logger.Error(string(body))
return nil, err
}
statusMap := make(map[string]api.IPFSPinStatus)
for k, v := range res.Keys {
statusMap[k] = api.IPFSPinStatusFromString(v.Type)
}
return statusMap, nil
} }
// PinLsCid performs a "pin ls <hash>" request. It will use "type=recursive" or // PinLsCid performs a "pin ls <hash>" request. It will use "type=recursive" or
@ -532,35 +567,31 @@ func (ipfs *Connector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinSt
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel() defer cancel()
if !pin.Defined() {
return api.IPFSPinStatusBug, errors.New("calling PinLsCid without a defined CID")
}
pinType := pin.MaxDepth.ToPinMode().String() pinType := pin.MaxDepth.ToPinMode().String()
lsPath := fmt.Sprintf("pin/ls?arg=%s&type=%s", pin.Cid, pinType) lsPath := fmt.Sprintf("pin/ls?stream=true&arg=%s&type=%s", pin.Cid, pinType)
body, err := ipfs.postCtx(ctx, lsPath, "", nil) body, err := ipfs.postCtxStreamResponse(ctx, lsPath, "", nil)
if body == nil && err != nil { // Network error, daemon down
return api.IPFSPinStatusError, err
}
if err != nil { // we could not find the pin
return api.IPFSPinStatusUnpinned, nil
}
var res ipfsPinLsResp
err = json.Unmarshal(body, &res)
if err != nil { if err != nil {
logger.Error("error parsing pin/ls?arg=cid response:") if errors.Is(ipfsUnpinnedError{}, err) {
logger.Error(string(body)) return api.IPFSPinStatusUnpinned, nil
}
return api.IPFSPinStatusError, err
}
defer body.Close()
var res api.IPFSPinInfo
dec := json.NewDecoder(body)
err = dec.Decode(&res)
if err != nil {
logger.Error("error parsing pin/ls?arg=cid response")
return api.IPFSPinStatusError, err return api.IPFSPinStatusError, err
} }
// We do not know what string format the returned key has so return res.Type, nil
// we parse as CID. There should only be one returned key.
for k, pinObj := range res.Keys {
c, err := cid.Decode(k)
if err != nil || !c.Equals(pin.Cid) {
continue
}
return api.IPFSPinStatusFromString(pinObj.Type), nil
}
return api.IPFSPinStatusError, errors.New("expected to find the pin in the response")
} }
func (ipfs *Connector) doPostCtx(ctx context.Context, client *http.Client, apiURL, path string, contentType string, postBody io.Reader) (*http.Response, error) { func (ipfs *Connector) doPostCtx(ctx context.Context, client *http.Client, apiURL, path string, contentType string, postBody io.Reader) (*http.Response, error) {
@ -601,7 +632,7 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
// No error response with useful message from ipfs // No error response with useful message from ipfs
return nil, fmt.Errorf( return nil, fmt.Errorf(
"IPFS request unsuccessful (%s). Code %d. Body: %s", "IPFS request failed (is it running?) (%s). Code %d: %s",
path, path,
res.StatusCode, res.StatusCode,
string(body)) string(body))
@ -611,18 +642,13 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
// the ipfs daemon, reads the full body of the response and // the ipfs daemon, reads the full body of the response and
// returns it after checking for errors. // returns it after checking for errors.
func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType string, postBody io.Reader) ([]byte, error) { func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType string, postBody io.Reader) ([]byte, error) {
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, contentType, postBody) rdr, err := ipfs.postCtxStreamResponse(ctx, path, contentType, postBody)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer res.Body.Close() defer rdr.Close()
errBody, err := checkResponse(path, res) body, err := ioutil.ReadAll(rdr)
if err != nil {
return errBody, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil { if err != nil {
logger.Errorf("error reading response body: %s", err) logger.Errorf("error reading response body: %s", err)
return nil, err return nil, err
@ -630,6 +656,21 @@ func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType str
return body, nil return body, nil
} }
// postCtxStreamResponse makes a POST request against the ipfs daemon, and
// returns the body reader after checking the request for errros.
func (ipfs *Connector) postCtxStreamResponse(ctx context.Context, path string, contentType string, postBody io.Reader) (io.ReadCloser, error) {
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, contentType, postBody)
if err != nil {
return nil, err
}
_, err = checkResponse(path, res)
if err != nil {
return nil, err
}
return res.Body, nil
}
// apiURL is a short-hand for building the url of the IPFS // apiURL is a short-hand for building the url of the IPFS
// daemon API. // daemon API.
func (ipfs *Connector) apiURL() string { func (ipfs *Connector) apiURL() string {

View File

@ -219,6 +219,27 @@ func TestIPFSPinLsCid_DifferentEncoding(t *testing.T) {
} }
} }
func collectPins(t *testing.T, pch <-chan api.IPFSPinInfo) []api.IPFSPinInfo {
t.Helper()
var pins []api.IPFSPinInfo
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
for {
select {
case <-ctx.Done():
t.Fatal(ctx.Err())
return nil
case p, ok := <-pch:
if !ok {
return pins
}
pins = append(pins, p)
}
}
}
func TestIPFSPinLs(t *testing.T) { func TestIPFSPinLs(t *testing.T) {
ctx := context.Background() ctx := context.Background()
ipfs, mock := testIPFSConnector(t) ipfs, mock := testIPFSConnector(t)
@ -229,16 +250,21 @@ func TestIPFSPinLs(t *testing.T) {
ipfs.Pin(ctx, api.PinCid(c)) ipfs.Pin(ctx, api.PinCid(c))
ipfs.Pin(ctx, api.PinCid(c2)) ipfs.Pin(ctx, api.PinCid(c2))
ipsMap, err := ipfs.PinLs(ctx, "") pinCh := make(chan api.IPFSPinInfo, 10)
if err != nil { go func() {
t.Error("should not error") err := ipfs.PinLs(ctx, []string{""}, pinCh)
if err != nil {
t.Error("should not error")
}
}()
pins := collectPins(t, pinCh)
if len(pins) != 2 {
t.Fatal("the pin list does not contain the expected number of keys")
} }
if len(ipsMap) != 2 { if !pins[0].Type.IsPinned(-1) || !pins[1].Type.IsPinned(-1) {
t.Fatal("the map does not contain expected keys")
}
if !ipsMap[test.Cid1.String()].IsPinned(-1) || !ipsMap[test.Cid2.String()].IsPinned(-1) {
t.Error("c1 and c2 should appear pinned") t.Error("c1 and c2 should appear pinned")
} }
} }

View File

@ -114,7 +114,7 @@ func TestClustersPeerAdd(t *testing.T) {
} }
// Check that they are part of the consensus // Check that they are part of the consensus
pins, err := c.Pins(ctx) pins, err := c.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -463,7 +463,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
// Find out which pins are associated to the chosen peer. // Find out which pins are associated to the chosen peer.
interestingCids := []cid.Cid{} interestingCids := []cid.Cid{}
pins, err := chosen.Pins(ctx) pins, err := chosen.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -537,7 +537,7 @@ func TestClustersPeerJoin(t *testing.T) {
if len(peers) != nClusters { if len(peers) != nClusters {
t.Error("all peers should be connected") t.Error("all peers should be connected")
} }
pins, err := c.Pins(ctx) pins, err := c.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -575,7 +575,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
if len(peers) != nClusters { if len(peers) != nClusters {
t.Error("all peers should be connected") t.Error("all peers should be connected")
} }
pins, err := c.Pins(ctx) pins, err := c.pinsSlice(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -140,7 +140,7 @@ func (opt *OperationTracker) SetError(ctx context.Context, c cid.Cid, err error)
} }
} }
func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) api.PinInfo { func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation, ipfs api.IPFSID) api.PinInfo {
if op == nil { if op == nil {
return api.PinInfo{ return api.PinInfo{
Cid: cid.Undef, Cid: cid.Undef,
@ -162,26 +162,27 @@ func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation) a
Peer: opt.pid, Peer: opt.pid,
Name: op.Pin().Name, Name: op.Pin().Name,
PinInfoShort: api.PinInfoShort{ PinInfoShort: api.PinInfoShort{
PeerName: opt.peerName, PeerName: opt.peerName,
IPFS: "", IPFS: ipfs.ID,
Status: op.ToTrackerStatus(), IPFSAddresses: ipfs.Addresses,
TS: op.Timestamp(), Status: op.ToTrackerStatus(),
AttemptCount: op.AttemptCount(), TS: op.Timestamp(),
PriorityPin: op.PriorityPin(), AttemptCount: op.AttemptCount(),
Error: op.Error(), PriorityPin: op.PriorityPin(),
Error: op.Error(),
}, },
} }
} }
// Get returns a PinInfo object for Cid. // Get returns a PinInfo object for Cid.
func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo { func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid, ipfs api.IPFSID) api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll") ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End() defer span.End()
opt.mu.RLock() opt.mu.RLock()
defer opt.mu.RUnlock() defer opt.mu.RUnlock()
op := opt.operations[c] op := opt.operations[c]
pInfo := opt.unsafePinInfo(ctx, op) pInfo := opt.unsafePinInfo(ctx, op, ipfs)
if pInfo.Cid == cid.Undef { if pInfo.Cid == cid.Undef {
pInfo.Cid = c pInfo.Cid = c
} }
@ -190,7 +191,7 @@ func (opt *OperationTracker) Get(ctx context.Context, c cid.Cid) api.PinInfo {
// GetExists returns a PinInfo object for a Cid only if there exists // GetExists returns a PinInfo object for a Cid only if there exists
// an associated Operation. // an associated Operation.
func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinInfo, bool) { func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid, ipfs api.IPFSID) (api.PinInfo, bool) {
ctx, span := trace.StartSpan(ctx, "optracker/GetExists") ctx, span := trace.StartSpan(ctx, "optracker/GetExists")
defer span.End() defer span.End()
@ -200,25 +201,51 @@ func (opt *OperationTracker) GetExists(ctx context.Context, c cid.Cid) (api.PinI
if !ok { if !ok {
return api.PinInfo{}, false return api.PinInfo{}, false
} }
pInfo := opt.unsafePinInfo(ctx, op) pInfo := opt.unsafePinInfo(ctx, op, ipfs)
return pInfo, true return pInfo, true
} }
// GetAll returns PinInfo objects for all known operations. // GetAll returns PinInfo objects for all known operations.
func (opt *OperationTracker) GetAll(ctx context.Context) []api.PinInfo { func (opt *OperationTracker) GetAll(ctx context.Context, ipfs api.IPFSID) []api.PinInfo {
ctx, span := trace.StartSpan(ctx, "optracker/GetAll") ctx, span := trace.StartSpan(ctx, "optracker/GetAll")
defer span.End() defer span.End()
ch := make(chan api.PinInfo, 1024)
var pinfos []api.PinInfo var pinfos []api.PinInfo
opt.mu.RLock() go opt.GetAllChannel(ctx, api.TrackerStatusUndefined, ipfs, ch)
defer opt.mu.RUnlock() for pinfo := range ch {
for _, op := range opt.operations {
pinfo := opt.unsafePinInfo(ctx, op)
pinfos = append(pinfos, pinfo) pinfos = append(pinfos, pinfo)
} }
return pinfos return pinfos
} }
// GetAllChannel returns all known operations that match the filter on the
// provided channel. Blocks until done.
func (opt *OperationTracker) GetAllChannel(ctx context.Context, filter api.TrackerStatus, ipfs api.IPFSID, out chan<- api.PinInfo) error {
defer close(out)
opt.mu.RLock()
defer opt.mu.RUnlock()
for _, op := range opt.operations {
pinfo := opt.unsafePinInfo(ctx, op, ipfs)
if pinfo.Status.Match(filter) {
select {
case <-ctx.Done():
return fmt.Errorf("listing operations aborted: %w", ctx.Err())
default:
}
select {
case <-ctx.Done():
return fmt.Errorf("listing operations aborted: %w", ctx.Err())
case out <- pinfo:
}
}
}
return nil
}
// CleanAllDone deletes any operation from the tracker that is in PhaseDone. // CleanAllDone deletes any operation from the tracker that is in PhaseDone.
func (opt *OperationTracker) CleanAllDone(ctx context.Context) { func (opt *OperationTracker) CleanAllDone(ctx context.Context) {
opt.mu.Lock() opt.mu.Lock()
@ -245,13 +272,13 @@ func (opt *OperationTracker) OpContext(ctx context.Context, c cid.Cid) context.C
// Operations that matched the provided filter. Note, only supports // Operations that matched the provided filter. Note, only supports
// filters of type OperationType or Phase, any other type // filters of type OperationType or Phase, any other type
// will result in a nil slice being returned. // will result in a nil slice being returned.
func (opt *OperationTracker) Filter(ctx context.Context, filters ...interface{}) []api.PinInfo { func (opt *OperationTracker) Filter(ctx context.Context, ipfs api.IPFSID, filters ...interface{}) []api.PinInfo {
var pinfos []api.PinInfo var pinfos []api.PinInfo
opt.mu.RLock() opt.mu.RLock()
defer opt.mu.RUnlock() defer opt.mu.RUnlock()
ops := filterOpsMap(ctx, opt.operations, filters) ops := filterOpsMap(ctx, opt.operations, filters)
for _, op := range ops { for _, op := range ops {
pinfo := opt.unsafePinInfo(ctx, op) pinfo := opt.unsafePinInfo(ctx, op, ipfs)
pinfos = append(pinfos, pinfo) pinfos = append(pinfos, pinfo)
} }
return pinfos return pinfos

View File

@ -126,7 +126,7 @@ func TestOperationTracker_SetError(t *testing.T) {
opt := testOperationTracker(t) opt := testOperationTracker(t)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone) opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
opt.SetError(ctx, test.Cid1, errors.New("fake error")) opt.SetError(ctx, test.Cid1, errors.New("fake error"))
pinfo := opt.Get(ctx, test.Cid1) pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{})
if pinfo.Status != api.TrackerStatusPinError { if pinfo.Status != api.TrackerStatusPinError {
t.Error("should have updated the status") t.Error("should have updated the status")
} }
@ -148,7 +148,7 @@ func TestOperationTracker_Get(t *testing.T) {
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone) opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone)
t.Run("Get with existing item", func(t *testing.T) { t.Run("Get with existing item", func(t *testing.T) {
pinfo := opt.Get(ctx, test.Cid1) pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{})
if pinfo.Status != api.TrackerStatusPinned { if pinfo.Status != api.TrackerStatusPinned {
t.Error("bad status") t.Error("bad status")
} }
@ -163,7 +163,7 @@ func TestOperationTracker_Get(t *testing.T) {
}) })
t.Run("Get with unexisting item", func(t *testing.T) { t.Run("Get with unexisting item", func(t *testing.T) {
pinfo := opt.Get(ctx, test.Cid2) pinfo := opt.Get(ctx, test.Cid2, api.IPFSID{})
if pinfo.Status != api.TrackerStatusUnpinned { if pinfo.Status != api.TrackerStatusUnpinned {
t.Error("bad status") t.Error("bad status")
} }
@ -181,7 +181,7 @@ func TestOperationTracker_GetAll(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opt := testOperationTracker(t) opt := testOperationTracker(t)
opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress) opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress)
pinfos := opt.GetAll(ctx) pinfos := opt.GetAll(ctx, api.IPFSID{})
if len(pinfos) != 1 { if len(pinfos) != 1 {
t.Fatal("expected 1 item") t.Fatal("expected 1 item")
} }

View File

@ -165,6 +165,28 @@ func TestPinTracker_Untrack(t *testing.T) {
} }
} }
func collectPinInfos(t *testing.T, out chan api.PinInfo) []api.PinInfo {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var pis []api.PinInfo
for {
select {
case <-ctx.Done():
t.Error("took too long")
return nil
case pi, ok := <-out:
if !ok {
return pis
}
pis = append(pis, pi)
}
}
}
func TestPinTracker_StatusAll(t *testing.T) { func TestPinTracker_StatusAll(t *testing.T) {
type args struct { type args struct {
c api.Pin c api.Pin
@ -216,7 +238,16 @@ func TestPinTracker_StatusAll(t *testing.T) {
t.Errorf("PinTracker.Track() error = %v", err) t.Errorf("PinTracker.Track() error = %v", err)
} }
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
got := tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined) infos := make(chan api.PinInfo)
go func() {
err := tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined, infos)
if err != nil {
t.Error()
}
}()
got := collectPinInfos(t, infos)
if len(got) != len(tt.want) { if len(got) != len(tt.want) {
for _, pi := range got { for _, pi := range got {
t.Logf("pinfo: %v", pi) t.Logf("pinfo: %v", pi)
@ -240,31 +271,6 @@ func TestPinTracker_StatusAll(t *testing.T) {
} }
} }
func BenchmarkPinTracker_StatusAll(b *testing.B) {
type args struct {
tracker ipfscluster.PinTracker
}
tests := []struct {
name string
args args
}{
{
"basic stateless track",
args{
testStatelessPinTracker(b),
},
},
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined)
}
})
}
}
func TestPinTracker_Status(t *testing.T) { func TestPinTracker_Status(t *testing.T) {
type args struct { type args struct {
c cid.Cid c cid.Cid
@ -350,11 +356,16 @@ func TestPinTracker_RecoverAll(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got, err := tt.args.tracker.RecoverAll(context.Background()) infos := make(chan api.PinInfo)
if (err != nil) != tt.wantErr { go func() {
t.Errorf("PinTracker.RecoverAll() error = %v, wantErr %v", err, tt.wantErr) err := tt.args.tracker.RecoverAll(context.Background(), infos)
return if (err != nil) != tt.wantErr {
} t.Errorf("PinTracker.RecoverAll() error = %v, wantErr %v", err, tt.wantErr)
return
}
}()
got := collectPinInfos(t, infos)
if len(got) != len(tt.want) { if len(got) != len(tt.want) {
for _, pi := range got { for _, pi := range got {

View File

@ -6,6 +6,7 @@ package stateless
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"sync" "sync"
"time" "time"
@ -23,6 +24,8 @@ import (
var logger = logging.Logger("pintracker") var logger = logging.Logger("pintracker")
const pinsChannelSize = 1024
var ( var (
// ErrFullQueue is the error used when pin or unpin operation channel is full. // ErrFullQueue is the error used when pin or unpin operation channel is full.
ErrFullQueue = errors.New("pin/unpin operation queue is full. Try increasing max_pin_queue_size") ErrFullQueue = errors.New("pin/unpin operation queue is full. Try increasing max_pin_queue_size")
@ -321,36 +324,134 @@ func (spt *Tracker) Untrack(ctx context.Context, c cid.Cid) error {
} }
// StatusAll returns information for all Cids pinned to the local IPFS node. // StatusAll returns information for all Cids pinned to the local IPFS node.
func (spt *Tracker) StatusAll(ctx context.Context, filter api.TrackerStatus) []api.PinInfo { func (spt *Tracker) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll") ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll")
defer span.End() defer span.End()
pininfos, err := spt.localStatus(ctx, true, filter)
if err != nil {
return nil
}
// get all inflight operations from optracker and put them into the
// map, deduplicating any existing items with their inflight operation.
//
// we cannot filter in GetAll, because we are meant to replace items in
// pininfos and set the correct status, as otherwise they will remain in
// PinError.
ipfsid := spt.getIPFSID(ctx) ipfsid := spt.getIPFSID(ctx)
for _, infop := range spt.optracker.GetAll(ctx) {
infop.IPFS = ipfsid.ID // Any other states are just operation-tracker states, so we just give
infop.IPFSAddresses = ipfsid.Addresses // those and return.
pininfos[infop.Cid] = infop if !filter.Match(
api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned |
api.TrackerStatusSharded | api.TrackerStatusRemote) {
return spt.optracker.GetAllChannel(ctx, filter, ipfsid, out)
} }
var pis []api.PinInfo defer close(out)
for _, pi := range pininfos {
// Last filter. // get global state - cluster pinset
if pi.Status.Match(filter) { st, err := spt.getState(ctx)
pis = append(pis, pi) if err != nil {
logger.Error(err)
return err
}
var ipfsRecursivePins map[api.Cid]api.IPFSPinStatus
// Only query IPFS if we want to status for pinned items
if filter.Match(api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned) {
ipfsRecursivePins = make(map[api.Cid]api.IPFSPinStatus)
// At some point we need a full map of what we have and what
// we don't. The IPFS pinset is the smallest thing we can keep
// on memory.
ipfsPinsCh, err := spt.ipfsPins(ctx)
if err != nil {
logger.Error(err)
return err
}
for ipfsPinInfo := range ipfsPinsCh {
ipfsRecursivePins[ipfsPinInfo.Cid] = ipfsPinInfo.Type
} }
} }
return pis
// Prepare pinset streaming
statePins := make(chan api.Pin, pinsChannelSize)
err = st.List(ctx, statePins)
if err != nil {
logger.Error(err)
return err
}
// a shorthand for this select.
trySend := func(info api.PinInfo) bool {
select {
case <-ctx.Done():
return false
case out <- info:
return true
}
}
// For every item in the state.
for p := range statePins {
select {
case <-ctx.Done():
default:
}
// if there is an operation, issue that and move on
info, ok := spt.optracker.GetExists(ctx, p.Cid, ipfsid)
if ok && filter.Match(info.Status) {
if !trySend(info) {
return fmt.Errorf("error issuing PinInfo: %w", ctx.Err())
}
continue // next pin
}
// Preliminary PinInfo for this Pin.
info = api.PinInfo{
Cid: p.Cid,
Name: p.Name,
Peer: spt.peerID,
Allocations: p.Allocations,
Origins: p.Origins,
Created: p.Timestamp,
Metadata: p.Metadata,
PinInfoShort: api.PinInfoShort{
PeerName: spt.peerName,
IPFS: ipfsid.ID,
IPFSAddresses: ipfsid.Addresses,
Status: api.TrackerStatusUndefined, // TBD
TS: p.Timestamp,
Error: "",
AttemptCount: 0,
PriorityPin: false,
},
}
ipfsStatus, pinnedInIpfs := ipfsRecursivePins[api.Cid(p.Cid)]
switch {
case p.Type == api.MetaType:
info.Status = api.TrackerStatusSharded
case p.IsRemotePin(spt.peerID):
info.Status = api.TrackerStatusRemote
case pinnedInIpfs:
// No need to filter. pinnedInIpfs is false
// unless the filter is Pinned |
// UnexpectedlyUnpinned. We filter at the end.
info.Status = ipfsStatus.ToTrackerStatus()
default:
// Not on an operation
// Not a meta pin
// Not a remote pin
// Not a pin on ipfs
// We understand that this is something that
// should be pinned on IPFS and it is not.
info.Status = api.TrackerStatusUnexpectedlyUnpinned
info.Error = errUnexpectedlyUnpinned.Error()
}
if !filter.Match(info.Status) {
continue
}
if !trySend(info) {
return fmt.Errorf("error issuing PinInfo: %w", ctx.Err())
}
}
return nil
} }
// Status returns information for a Cid pinned to the local IPFS node. // Status returns information for a Cid pinned to the local IPFS node.
@ -361,10 +462,7 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
ipfsid := spt.getIPFSID(ctx) ipfsid := spt.getIPFSID(ctx)
// check if c has an inflight operation or errorred operation in optracker // check if c has an inflight operation or errorred operation in optracker
if oppi, ok := spt.optracker.GetExists(ctx, c); ok { if oppi, ok := spt.optracker.GetExists(ctx, c, ipfsid); ok {
// if it does return the status of the operation
oppi.IPFS = ipfsid.ID
oppi.IPFSAddresses = ipfsid.Addresses
return oppi return oppi
} }
@ -452,31 +550,46 @@ func (spt *Tracker) Status(ctx context.Context, c cid.Cid) api.PinInfo {
} }
// RecoverAll attempts to recover all items tracked by this peer. It returns // RecoverAll attempts to recover all items tracked by this peer. It returns
// items that have been re-queued. // any errors or when it is done re-tracking.
func (spt *Tracker) RecoverAll(ctx context.Context) ([]api.PinInfo, error) { func (spt *Tracker) RecoverAll(ctx context.Context, out chan<- api.PinInfo) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll") ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll")
defer span.End() defer span.End()
// FIXME: make sure this returns a channel. statusesCh := make(chan api.PinInfo, 1024)
statuses := spt.StatusAll(ctx, api.TrackerStatusUndefined) err := spt.StatusAll(ctx, api.TrackerStatusUndefined, statusesCh)
resp := make([]api.PinInfo, 0) if err != nil {
for _, st := range statuses { return err
}
for st := range statusesCh {
// Break out if we shutdown. We might be going through // Break out if we shutdown. We might be going through
// a very long list of statuses. // a very long list of statuses.
select { select {
case <-spt.ctx.Done(): case <-spt.ctx.Done():
return nil, spt.ctx.Err() err = fmt.Errorf("RecoverAll aborted: %w", ctx.Err())
logger.Error(err)
return err
default: default:
r, err := spt.recoverWithPinInfo(ctx, st) p, err := spt.recoverWithPinInfo(ctx, st)
if err != nil { if err != nil {
return resp, err err = fmt.Errorf("RecoverAll error: %w", err)
logger.Error(err)
return err
} }
if r.Defined() { if p.Defined() {
resp = append(resp, r) select {
case <-ctx.Done():
err = fmt.Errorf("RecoverAll aborted: %w", ctx.Err())
logger.Error(err)
return err
case out <- p:
}
} }
} }
} }
return resp, nil return nil
} }
// Recover will trigger pinning or unpinning for items in // Recover will trigger pinning or unpinning for items in
@ -485,13 +598,7 @@ func (spt *Tracker) Recover(ctx context.Context, c cid.Cid) (api.PinInfo, error)
ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover") ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover")
defer span.End() defer span.End()
// Check if we have a status in the operation tracker and use that pi := spt.Status(ctx, c)
// pininfo. Otherwise, get a status by checking against IPFS and use
// that.
pi, ok := spt.optracker.GetExists(ctx, c)
if !ok {
pi = spt.Status(ctx, c)
}
recPi, err := spt.recoverWithPinInfo(ctx, pi) recPi, err := spt.recoverWithPinInfo(ctx, pi)
// if it was not enqueued, no updated pin-info is returned. // if it was not enqueued, no updated pin-info is returned.
@ -524,158 +631,29 @@ func (spt *Tracker) recoverWithPinInfo(ctx context.Context, pi api.PinInfo) (api
return spt.Status(ctx, pi.Cid), nil return spt.Status(ctx, pi.Cid), nil
} }
func (spt *Tracker) ipfsStatusAll(ctx context.Context) (map[cid.Cid]api.PinInfo, error) { func (spt *Tracker) ipfsPins(ctx context.Context) (<-chan api.IPFSPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll") ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll")
defer span.End() defer span.End()
var ipsMap map[string]api.IPFSPinStatus in := make(chan []string, 1) // type filter.
err := spt.rpcClient.CallContext( in <- []string{"recursive", "direct"}
ctx, close(in)
"", out := make(chan api.IPFSPinInfo, pinsChannelSize)
"IPFSConnector",
"PinLs", go func() {
"recursive", err := spt.rpcClient.Stream(
&ipsMap, ctx,
) "",
if err != nil { "IPFSConnector",
logger.Error(err) "PinLs",
return nil, err in,
} out,
ipfsid := spt.getIPFSID(ctx) )
pins := make(map[cid.Cid]api.PinInfo, len(ipsMap))
for cidstr, ips := range ipsMap {
c, err := cid.Decode(cidstr)
if err != nil { if err != nil {
logger.Error(err) logger.Error(err)
continue
} }
p := api.PinInfo{ }()
Cid: c, return out, nil
Name: "", // to be filled later
Allocations: nil, // to be filled later
Origins: nil, // to be filled later
//Created: nil, // to be filled later
Metadata: nil, // to be filled later
Peer: spt.peerID,
PinInfoShort: api.PinInfoShort{
PeerName: spt.peerName,
IPFS: ipfsid.ID,
IPFSAddresses: ipfsid.Addresses,
Status: ips.ToTrackerStatus(),
TS: time.Now(), // to be set later
AttemptCount: 0,
PriorityPin: false,
},
}
pins[c] = p
}
return pins, nil
}
// localStatus returns a joint set of consensusState and ipfsStatus marking
// pins which should be meta or remote and leaving any ipfs pins that aren't
// in the consensusState out. If incExtra is true, Remote and Sharded pins
// will be added to the status slice. If a filter is provided, only statuses
// matching the filter will be returned.
func (spt *Tracker) localStatus(ctx context.Context, incExtra bool, filter api.TrackerStatus) (map[cid.Cid]api.PinInfo, error) {
ctx, span := trace.StartSpan(ctx, "tracker/stateless/localStatus")
defer span.End()
// get shared state
st, err := spt.getState(ctx)
if err != nil {
logger.Error(err)
return nil, err
}
// Only list the full pinset if we are interested in pin types that
// require it. Otherwise said, this whole method is mostly a no-op
// when filtering for queued/error items which are all in the operation
// tracker.
var statePins <-chan api.Pin
if filter.Match(
api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned |
api.TrackerStatusSharded | api.TrackerStatusRemote) {
statePins, err = st.List(ctx)
if err != nil {
logger.Error(err)
return nil, err
}
} else {
// no state pins
ch := make(chan api.Pin)
close(ch)
statePins = ch
}
var localpis map[cid.Cid]api.PinInfo
// Only query IPFS if we want to status for pinned items
if filter.Match(api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned) {
localpis, err = spt.ipfsStatusAll(ctx)
if err != nil {
logger.Error(err)
return nil, err
}
}
pininfos := make(map[cid.Cid]api.PinInfo, len(statePins))
ipfsid := spt.getIPFSID(ctx)
for p := range statePins {
ipfsInfo, pinnedInIpfs := localpis[p.Cid]
// base pinInfo object - status to be filled.
pinInfo := api.PinInfo{
Cid: p.Cid,
Name: p.Name,
Peer: spt.peerID,
Allocations: p.Allocations,
Origins: p.Origins,
Created: p.Timestamp,
Metadata: p.Metadata,
PinInfoShort: api.PinInfoShort{
PeerName: spt.peerName,
IPFS: ipfsid.ID,
IPFSAddresses: ipfsid.Addresses,
TS: p.Timestamp,
AttemptCount: 0,
PriorityPin: false,
},
}
switch {
case p.Type == api.MetaType:
if !incExtra || !filter.Match(api.TrackerStatusSharded) {
continue
}
pinInfo.Status = api.TrackerStatusSharded
pininfos[p.Cid] = pinInfo
case p.IsRemotePin(spt.peerID):
if !incExtra || !filter.Match(api.TrackerStatusRemote) {
continue
}
pinInfo.Status = api.TrackerStatusRemote
pininfos[p.Cid] = pinInfo
case pinnedInIpfs: // always false unless filter matches TrackerStatusPinnned
ipfsInfo.Name = p.Name
ipfsInfo.TS = p.Timestamp
ipfsInfo.Allocations = p.Allocations
ipfsInfo.Origins = p.Origins
ipfsInfo.Created = p.Timestamp
ipfsInfo.Metadata = p.Metadata
pininfos[p.Cid] = ipfsInfo
default:
// report as UNEXPECTEDLY_UNPINNED for this peer.
// this will be overwritten if the operation tracker
// has more info for this (an ongoing pinning
// operation). Otherwise, it means something should be
// pinned and it is not known by IPFS. Should be
// handled to "recover".
pinInfo.Status = api.TrackerStatusUnexpectedlyUnpinned
pinInfo.Error = errUnexpectedlyUnpinned.Error()
pininfos[p.Cid] = pinInfo
}
}
return pininfos, nil
} }
// func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo { // func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo {

View File

@ -64,13 +64,17 @@ func (mock *mockIPFS) Unpin(ctx context.Context, in api.Pin, out *struct{}) erro
return nil return nil
} }
func (mock *mockIPFS) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error { func (mock *mockIPFS) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
// Must be consistent with PinLsCid out <- api.IPFSPinInfo{
m := map[string]api.IPFSPinStatus{ Cid: api.Cid(test.Cid1),
test.Cid1.String(): api.IPFSPinStatusRecursive, Type: api.IPFSPinStatusRecursive,
test.Cid2.String(): api.IPFSPinStatusRecursive,
} }
*out = m
out <- api.IPFSPinInfo{
Cid: api.Cid(test.Cid2),
Type: api.IPFSPinStatusRecursive,
}
close(out)
return nil return nil
} }
@ -207,7 +211,7 @@ func TestTrackUntrackWithCancel(t *testing.T) {
time.Sleep(100 * time.Millisecond) // let pinning start time.Sleep(100 * time.Millisecond) // let pinning start
pInfo := spt.optracker.Get(ctx, slowPin.Cid) pInfo := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{})
if pInfo.Status == api.TrackerStatusUnpinned { if pInfo.Status == api.TrackerStatusUnpinned {
t.Fatal("slowPin should be tracked") t.Fatal("slowPin should be tracked")
} }
@ -264,7 +268,7 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
} }
// fastPin should be queued because slow pin is pinning // fastPin should be queued because slow pin is pinning
fastPInfo := spt.optracker.Get(ctx, fastPin.Cid) fastPInfo := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
if fastPInfo.Status == api.TrackerStatusUnpinned { if fastPInfo.Status == api.TrackerStatusUnpinned {
t.Fatal("fastPin should be tracked") t.Fatal("fastPin should be tracked")
} }
@ -281,7 +285,7 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
t.Errorf("fastPin should be queued to pin but is %s", fastPInfo.Status) t.Errorf("fastPin should be queued to pin but is %s", fastPInfo.Status)
} }
pi := spt.optracker.Get(ctx, fastPin.Cid) pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
if pi.Cid == cid.Undef { if pi.Cid == cid.Undef {
t.Error("fastPin should have been removed from tracker") t.Error("fastPin should have been removed from tracker")
} }
@ -313,7 +317,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
pi := spt.optracker.Get(ctx, slowPin.Cid) pi := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{})
if pi.Cid == cid.Undef { if pi.Cid == cid.Undef {
t.Fatal("expected slowPin to be tracked") t.Fatal("expected slowPin to be tracked")
} }
@ -374,7 +378,7 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
pi := spt.optracker.Get(ctx, fastPin.Cid) pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{})
if pi.Cid == cid.Undef { if pi.Cid == cid.Undef {
t.Fatal("c untrack operation should be tracked") t.Fatal("c untrack operation should be tracked")
} }
@ -405,11 +409,10 @@ func TestStatusAll(t *testing.T) {
// - Build a state with one pins (Cid1,Cid4) // - Build a state with one pins (Cid1,Cid4)
// - The IPFS Mock reports Cid1 and Cid2 // - The IPFS Mock reports Cid1 and Cid2
// - Track a SlowCid additionally // - Track a SlowCid additionally
slowPin := api.PinWithOpts(test.SlowCid1, pinOpts)
spt := testStatelessPinTracker(t, normalPin, normalPin2) spt := testStatelessPinTracker(t, normalPin, normalPin2, slowPin)
defer spt.Shutdown(ctx) defer spt.Shutdown(ctx)
slowPin := api.PinWithOpts(test.SlowCid1, pinOpts)
err := spt.Track(ctx, slowPin) err := spt.Track(ctx, slowPin)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -421,20 +424,23 @@ func TestStatusAll(t *testing.T) {
// * A slow CID pinning // * A slow CID pinning
// * Cid1 is pinned // * Cid1 is pinned
// * Cid4 should be in PinError (it's in the state but not on IPFS) // * Cid4 should be in PinError (it's in the state but not on IPFS)
stAll := spt.StatusAll(ctx, api.TrackerStatusUndefined) stAll := make(chan api.PinInfo, 10)
if len(stAll) != 3 { err = spt.StatusAll(ctx, api.TrackerStatusUndefined, stAll)
t.Errorf("wrong status length. Expected 3, got: %d", len(stAll)) if err != nil {
t.Fatal(err)
} }
for _, pi := range stAll { n := 0
for pi := range stAll {
n++
switch pi.Cid { switch pi.Cid {
case test.Cid1: case test.Cid1:
if pi.Status != api.TrackerStatusPinned { if pi.Status != api.TrackerStatusPinned {
t.Error("cid1 should be pinned") t.Error(test.Cid1, " should be pinned")
} }
case test.Cid4: case test.Cid4:
if pi.Status != api.TrackerStatusUnexpectedlyUnpinned { if pi.Status != api.TrackerStatusUnexpectedlyUnpinned {
t.Error("cid2 should be in unexpectedly_unpinned status") t.Error(test.Cid2, " should be in unexpectedly_unpinned status")
} }
case test.SlowCid1: case test.SlowCid1:
if pi.Status != api.TrackerStatusPinning { if pi.Status != api.TrackerStatusPinning {
@ -447,6 +453,9 @@ func TestStatusAll(t *testing.T) {
t.Error("IPFS field should be set") t.Error("IPFS field should be set")
} }
} }
if n != 3 {
t.Errorf("wrong status length. Expected 3, got: %d", n)
}
} }
// TestStatus checks that the Status calls correctly reports tracked // TestStatus checks that the Status calls correctly reports tracked
@ -565,12 +574,3 @@ func TestAttemptCountAndPriority(t *testing.T) {
t.Errorf("errPin should have 2 attempt counts to unpin: %+v", st) t.Errorf("errPin should have 2 attempt counts to unpin: %+v", st)
} }
} }
func BenchmarkTracker_localStatus(b *testing.B) {
tracker := testStatelessPinTracker(b)
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
tracker.localStatus(ctx, true, api.TrackerStatusUndefined)
}
}

View File

@ -2,6 +2,7 @@ package ipfscluster
import ( import (
"context" "context"
"errors"
"github.com/ipfs/ipfs-cluster/api" "github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state" "github.com/ipfs/ipfs-cluster/state"
@ -32,6 +33,8 @@ const (
// RPCEndpointType controls how access is granted to an RPC endpoint // RPCEndpointType controls how access is granted to an RPC endpoint
type RPCEndpointType int type RPCEndpointType int
const rpcStreamBufferSize = 1024
// A trick to find where something is used (i.e. Cluster.Pin): // A trick to find where something is used (i.e. Cluster.Pin):
// grep -R -B 3 '"Pin"' | grep -C 1 '"Cluster"'. // grep -R -B 3 '"Pin"' | grep -C 1 '"Cluster"'.
// This does not cover globalPinInfo*(...) broadcasts nor redirects to leader // This does not cover globalPinInfo*(...) broadcasts nor redirects to leader
@ -63,6 +66,7 @@ func newRPCServer(c *Cluster) (*rpc.Server, error) {
version.RPCProtocol, version.RPCProtocol,
rpc.WithServerStatsHandler(&ocgorpc.ServerHandler{}), rpc.WithServerStatsHandler(&ocgorpc.ServerHandler{}),
rpc.WithAuthorizeFunc(authF), rpc.WithAuthorizeFunc(authF),
rpc.WithStreamBufferSize(rpcStreamBufferSize),
) )
} else { } else {
s = rpc.NewServer(c.host, version.RPCProtocol, rpc.WithAuthorizeFunc(authF)) s = rpc.NewServer(c.host, version.RPCProtocol, rpc.WithAuthorizeFunc(authF))
@ -201,17 +205,7 @@ func (rpcapi *ClusterRPCAPI) UnpinPath(ctx context.Context, in api.PinPath, out
// Pins runs Cluster.Pins(). // Pins runs Cluster.Pins().
func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error { func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error {
pinCh, err := rpcapi.c.PinsChannel(ctx) return rpcapi.c.Pins(ctx, out)
if err != nil {
return err
}
for pin := range pinCh {
out <- pin
}
close(out)
return ctx.Err()
} }
// PinGet runs Cluster.PinGet(). // PinGet runs Cluster.PinGet().
@ -275,20 +269,15 @@ func (rpcapi *ClusterRPCAPI) Join(ctx context.Context, in api.Multiaddr, out *st
} }
// StatusAll runs Cluster.StatusAll(). // StatusAll runs Cluster.StatusAll().
func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.GlobalPinInfo) error { func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.StatusAll(ctx, in) filter := <-in
if err != nil { return rpcapi.c.StatusAll(ctx, filter, out)
return err
}
*out = pinfos
return nil
} }
// StatusAllLocal runs Cluster.StatusAllLocal(). // StatusAllLocal runs Cluster.StatusAllLocal().
func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error { func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
pinfos := rpcapi.c.StatusAllLocal(ctx, in) filter := <-in
*out = pinfos return rpcapi.c.StatusAllLocal(ctx, filter, out)
return nil
} }
// Status runs Cluster.Status(). // Status runs Cluster.Status().
@ -309,23 +298,13 @@ func (rpcapi *ClusterRPCAPI) StatusLocal(ctx context.Context, in cid.Cid, out *a
} }
// RecoverAll runs Cluster.RecoverAll(). // RecoverAll runs Cluster.RecoverAll().
func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfo) error { func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error {
pinfos, err := rpcapi.c.RecoverAll(ctx) return rpcapi.c.RecoverAll(ctx, out)
if err != nil {
return err
}
*out = pinfos
return nil
} }
// RecoverAllLocal runs Cluster.RecoverAllLocal(). // RecoverAllLocal runs Cluster.RecoverAllLocal().
func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfo) error { func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
pinfos, err := rpcapi.c.RecoverAllLocal(ctx) return rpcapi.c.RecoverAllLocal(ctx, out)
if err != nil {
return err
}
*out = pinfos
return nil
} }
// Recover runs Cluster.Recover(). // Recover runs Cluster.Recover().
@ -469,11 +448,17 @@ func (rpcapi *PinTrackerRPCAPI) Untrack(ctx context.Context, in api.Pin, out *st
} }
// StatusAll runs PinTracker.StatusAll(). // StatusAll runs PinTracker.StatusAll().
func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error { func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll") ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll")
defer span.End() defer span.End()
*out = rpcapi.tracker.StatusAll(ctx, in)
return nil select {
case <-ctx.Done():
close(out)
return ctx.Err()
case filter := <-in:
return rpcapi.tracker.StatusAll(ctx, filter, out)
}
} }
// Status runs PinTracker.Status(). // Status runs PinTracker.Status().
@ -486,15 +471,10 @@ func (rpcapi *PinTrackerRPCAPI) Status(ctx context.Context, in cid.Cid, out *api
} }
// RecoverAll runs PinTracker.RecoverAll().f // RecoverAll runs PinTracker.RecoverAll().f
func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfo) error { func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll") ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll")
defer span.End() defer span.End()
pinfos, err := rpcapi.tracker.RecoverAll(ctx) return rpcapi.tracker.RecoverAll(ctx, out)
if err != nil {
return err
}
*out = pinfos
return nil
} }
// Recover runs PinTracker.Recover(). // Recover runs PinTracker.Recover().
@ -533,13 +513,18 @@ func (rpcapi *IPFSConnectorRPCAPI) PinLsCid(ctx context.Context, in api.Pin, out
} }
// PinLs runs IPFSConnector.PinLs(). // PinLs runs IPFSConnector.PinLs().
func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error { func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
m, err := rpcapi.ipfs.PinLs(ctx, in) select {
if err != nil { case <-ctx.Done():
return err close(out)
return ctx.Err()
case pinTypes, ok := <-in:
if !ok {
close(out)
return errors.New("no pinType provided for pin/ls")
}
return rpcapi.ipfs.PinLs(ctx, pinTypes, out)
} }
*out = m
return nil
} }
// ConfigKey runs IPFSConnector.ConfigKey(). // ConfigKey runs IPFSConnector.ConfigKey().

View File

@ -12,17 +12,17 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start && sleep 5 && cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
' '
test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" ' test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export -f import.json && ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
@ -30,7 +30,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" '
cluster_start && sleep 5 && cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
' '
cluster_kill cluster_kill
@ -42,17 +42,17 @@ test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state cleanup -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
cluster_start && sleep 5 && cluster_start && sleep 5 &&
[ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
' '
test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" ' test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` &&
ipfs-cluster-ctl pin add "$cid" && sleep 5 && ipfs-cluster-ctl pin add "$cid" && sleep 5 &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] && [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] &&
cluster_kill && sleep 5 && cluster_kill && sleep 5 &&
ipfs-cluster-service --config "test-config" state export -f import.json && ipfs-cluster-service --config "test-config" state export -f import.json &&
ipfs-cluster-service --config "test-config" state cleanup -f && ipfs-cluster-service --config "test-config" state cleanup -f &&
@ -60,7 +60,7 @@ test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" '
cluster_start && sleep 5 && cluster_start && sleep 5 &&
ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" &&
ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" &&
[ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq ". | length")" ] [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ]
' '

View File

@ -4,6 +4,7 @@ package dsstate
import ( import (
"context" "context"
"fmt"
"io" "io"
"github.com/ipfs/ipfs-cluster/api" "github.com/ipfs/ipfs-cluster/api"
@ -122,9 +123,11 @@ func (st *State) Has(ctx context.Context, c cid.Cid) (bool, error) {
return ok, nil return ok, nil
} }
// List returns the unsorted list of all Pins that have been added to the // List sends all the pins on the pinset on the given channel.
// datastore. // Returns and closes channel when done.
func (st *State) List(ctx context.Context) (<-chan api.Pin, error) { func (st *State) List(ctx context.Context, out chan<- api.Pin) error {
defer close(out)
_, span := trace.StartSpan(ctx, "state/dsstate/List") _, span := trace.StartSpan(ctx, "state/dsstate/List")
defer span.End() defer span.End()
@ -134,52 +137,49 @@ func (st *State) List(ctx context.Context) (<-chan api.Pin, error) {
results, err := st.dsRead.Query(ctx, q) results, err := st.dsRead.Query(ctx, q)
if err != nil { if err != nil {
return nil, err return err
} }
pinsCh := make(chan api.Pin, 1024) defer results.Close()
go func() {
defer close(pinsCh)
defer results.Close() total := 0
for r := range results.Next() {
total := 0 // Abort if we shutdown.
for r := range results.Next() { select {
// Abort if we shutdown. case <-ctx.Done():
select { err = fmt.Errorf("full pinset listing aborted: %w", ctx.Err())
case <-ctx.Done(): logger.Warning(err)
logger.Warningf("Full pinset listing aborted: %s", ctx.Err()) return err
return default:
default:
}
if r.Error != nil {
logger.Errorf("error in query result: %s", r.Error)
return
}
k := ds.NewKey(r.Key)
ci, err := st.unkey(k)
if err != nil {
logger.Warn("bad key (ignoring). key: ", k, "error: ", err)
continue
}
p, err := st.deserializePin(ci, r.Value)
if err != nil {
logger.Errorf("error deserializing pin (%s): %s", r.Key, err)
continue
}
pinsCh <- p
if total > 0 && total%500000 == 0 {
logger.Infof("Full pinset listing in progress: %d pins so far", total)
}
total++
} }
if total >= 500000 { if r.Error != nil {
logger.Infof("Full pinset listing finished: %d pins", total) err := fmt.Errorf("error in query result: %w", r.Error)
logger.Error(err)
return err
}
k := ds.NewKey(r.Key)
ci, err := st.unkey(k)
if err != nil {
logger.Warn("bad key (ignoring). key: ", k, "error: ", err)
continue
} }
}()
return pinsCh, nil p, err := st.deserializePin(ci, r.Value)
if err != nil {
logger.Errorf("error deserializing pin (%s): %s", r.Key, err)
continue
}
out <- p
if total > 0 && total%500000 == 0 {
logger.Infof("Full pinset listing in progress: %d pins so far", total)
}
total++
}
if total >= 500000 {
logger.Infof("Full pinset listing finished: %d pins", total)
}
return nil
} }
// Migrate migrates an older state version to the current one. // Migrate migrates an older state version to the current one.

View File

@ -93,10 +93,13 @@ func TestList(t *testing.T) {
}() }()
st := newState(t) st := newState(t)
st.Add(ctx, c) st.Add(ctx, c)
pinCh, err := st.List(ctx) out := make(chan api.Pin)
if err != nil { go func() {
t.Fatal(err) err := st.List(ctx, out)
} if err != nil {
t.Error(err)
}
}()
ctx, cancel := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel() defer cancel()
@ -104,7 +107,7 @@ func TestList(t *testing.T) {
var list0 api.Pin var list0 api.Pin
for { for {
select { select {
case p, ok := <-pinCh: case p, ok := <-out:
if !ok && !list0.Cid.Defined() { if !ok && !list0.Cid.Defined() {
t.Fatal("should have read list0 first") t.Fatal("should have read list0 first")
} }

View File

@ -10,10 +10,9 @@ import (
type empty struct{} type empty struct{}
func (e *empty) List(ctx context.Context) (<-chan api.Pin, error) { func (e *empty) List(ctx context.Context, out chan<- api.Pin) error {
ch := make(chan api.Pin) close(out)
close(ch) return nil
return ch, nil
} }
func (e *empty) Has(ctx context.Context, c cid.Cid) (bool, error) { func (e *empty) Has(ctx context.Context, c cid.Cid) (bool, error) {

View File

@ -34,7 +34,7 @@ type State interface {
// ReadOnly represents the read side of a State. // ReadOnly represents the read side of a State.
type ReadOnly interface { type ReadOnly interface {
// List lists all the pins in the state. // List lists all the pins in the state.
List(context.Context) (<-chan api.Pin, error) List(context.Context, chan<- api.Pin) error
// Has returns true if the state is holding information for a Cid. // Has returns true if the state is holding information for a Cid.
Has(context.Context, cid.Cid) (bool, error) Has(context.Context, cid.Cid) (bool, error)
// Get returns the information attacthed to this pin, if any. If the // Get returns the information attacthed to this pin, if any. If the

View File

@ -58,7 +58,7 @@ type mockPinType struct {
Type string Type string
} }
type mockPinLsResp struct { type mockPinLsAllResp struct {
Keys map[string]mockPinType Keys map[string]mockPinType
} }
@ -268,19 +268,35 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
j, _ := json.Marshal(resp) j, _ := json.Marshal(resp)
w.Write(j) w.Write(j)
case "pin/ls": case "pin/ls":
query := r.URL.Query()
stream := query.Get("stream") == "true"
arg, ok := extractCid(r.URL) arg, ok := extractCid(r.URL)
if !ok { if !ok {
rMap := make(map[string]mockPinType) pins := make(chan api.Pin, 10)
pins, err := m.pinMap.List(ctx)
if err != nil { go func() {
goto ERROR m.pinMap.List(ctx, pins)
}()
if stream {
for p := range pins {
j, _ := json.Marshal(api.IPFSPinInfo{
Cid: api.Cid(p.Cid),
Type: p.Mode.ToIPFSPinStatus(),
})
w.Write(j)
}
break
} else {
rMap := make(map[string]mockPinType)
for p := range pins {
rMap[p.Cid.String()] = mockPinType{p.Mode.String()}
}
j, _ := json.Marshal(mockPinLsAllResp{rMap})
w.Write(j)
break
} }
for p := range pins {
rMap[p.Cid.String()] = mockPinType{p.Mode.String()}
}
j, _ := json.Marshal(mockPinLsResp{rMap})
w.Write(j)
break
} }
cidStr := arg cidStr := arg
@ -301,16 +317,28 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
return return
} }
if c.Equals(Cid4) { if stream {
// this a v1 cid. Do not return default-base32 but base58btc encoding of it if c.Equals(Cid4) {
w.Write([]byte(`{ "Keys": { "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1": { "Type": "recursive" }}}`)) // this a v1 cid. Do not return default-base32 but base58btc encoding of it
return w.Write([]byte(`{ "Cid": "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1", "Type": "recursive" }`))
break
}
j, _ := json.Marshal(api.IPFSPinInfo{
Cid: api.Cid(pinObj.Cid),
Type: pinObj.Mode.ToIPFSPinStatus(),
})
w.Write(j)
} else {
if c.Equals(Cid4) {
// this a v1 cid. Do not return default-base32 but base58btc encoding of it
w.Write([]byte(`{ "Keys": { "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1": { "Type": "recursive" }}}`))
break
}
rMap := make(map[string]mockPinType)
rMap[cidStr] = mockPinType{pinObj.Mode.String()}
j, _ := json.Marshal(mockPinLsAllResp{rMap})
w.Write(j)
} }
rMap := make(map[string]mockPinType)
rMap[cidStr] = mockPinType{pinObj.Mode.String()}
j, _ := json.Marshal(mockPinLsResp{rMap})
w.Write(j)
case "swarm/connect": case "swarm/connect":
arg, ok := extractCid(r.URL) arg, ok := extractCid(r.URL)
if !ok { if !ok {
@ -424,10 +452,10 @@ func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
case "repo/stat": case "repo/stat":
sizeOnly := r.URL.Query().Get("size-only") sizeOnly := r.URL.Query().Get("size-only")
pinsCh, err := m.pinMap.List(ctx) pinsCh := make(chan api.Pin, 10)
if err != nil { go func() {
goto ERROR m.pinMap.List(ctx, pinsCh)
} }()
var pins []api.Pin var pins []api.Pin
for p := range pinsCh { for p := range pinsCh {

View File

@ -34,8 +34,8 @@ func NewMockRPCClient(t testing.TB) *rpc.Client {
// NewMockRPCClientWithHost returns a mock ipfs-cluster RPC server // NewMockRPCClientWithHost returns a mock ipfs-cluster RPC server
// initialized with a given host. // initialized with a given host.
func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client { func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client {
s := rpc.NewServer(h, "mock") s := rpc.NewServer(h, "mock", rpc.WithStreamBufferSize(1024))
c := rpc.NewClientWithServer(h, "mock", s) c := rpc.NewClientWithServer(h, "mock", s, rpc.WithMultiStreamBufferSize(1024))
err := s.RegisterName("Cluster", &mockCluster{}) err := s.RegisterName("Cluster", &mockCluster{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -230,7 +230,10 @@ func (mock *mockCluster) ConnectGraph(ctx context.Context, in struct{}, out *api
return nil return nil
} }
func (mock *mockCluster) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.GlobalPinInfo) error { func (mock *mockCluster) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error {
defer close(out)
filter := <-in
pid := peer.Encode(PeerID1) pid := peer.Encode(PeerID1)
gPinInfos := []api.GlobalPinInfo{ gPinInfos := []api.GlobalPinInfo{
{ {
@ -272,23 +275,21 @@ func (mock *mockCluster) StatusAll(ctx context.Context, in api.TrackerStatus, ou
// a single peer, we will not have an entry for the cid at all. // a single peer, we will not have an entry for the cid at all.
for _, gpi := range gPinInfos { for _, gpi := range gPinInfos {
for id, pi := range gpi.PeerMap { for id, pi := range gpi.PeerMap {
if !in.Match(pi.Status) { if !filter.Match(pi.Status) {
delete(gpi.PeerMap, id) delete(gpi.PeerMap, id)
} }
} }
} }
filtered := make([]api.GlobalPinInfo, 0, len(gPinInfos))
for _, gpi := range gPinInfos { for _, gpi := range gPinInfos {
if len(gpi.PeerMap) > 0 { if len(gpi.PeerMap) > 0 {
filtered = append(filtered, gpi) out <- gpi
} }
} }
*out = filtered
return nil return nil
} }
func (mock *mockCluster) StatusAllLocal(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error { func (mock *mockCluster) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
return (&mockPinTracker{}).StatusAll(ctx, in, out) return (&mockPinTracker{}).StatusAll(ctx, in, out)
} }
@ -324,11 +325,14 @@ func (mock *mockCluster) StatusLocal(ctx context.Context, in cid.Cid, out *api.P
return (&mockPinTracker{}).Status(ctx, in, out) return (&mockPinTracker{}).Status(ctx, in, out)
} }
func (mock *mockCluster) RecoverAll(ctx context.Context, in struct{}, out *[]api.GlobalPinInfo) error { func (mock *mockCluster) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error {
return mock.StatusAll(ctx, api.TrackerStatusUndefined, out) f := make(chan api.TrackerStatus, 1)
f <- api.TrackerStatusUndefined
close(f)
return mock.StatusAll(ctx, f, out)
} }
func (mock *mockCluster) RecoverAllLocal(ctx context.Context, in struct{}, out *[]api.PinInfo) error { func (mock *mockCluster) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
return (&mockPinTracker{}).RecoverAll(ctx, in, out) return (&mockPinTracker{}).RecoverAll(ctx, in, out)
} }
@ -421,7 +425,10 @@ func (mock *mockPinTracker) Untrack(ctx context.Context, in api.Pin, out *struct
return nil return nil
} }
func (mock *mockPinTracker) StatusAll(ctx context.Context, in api.TrackerStatus, out *[]api.PinInfo) error { func (mock *mockPinTracker) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error {
defer close(out)
filter := <-in
pinInfos := []api.PinInfo{ pinInfos := []api.PinInfo{
{ {
Cid: Cid1, Cid: Cid1,
@ -440,14 +447,11 @@ func (mock *mockPinTracker) StatusAll(ctx context.Context, in api.TrackerStatus,
}, },
}, },
} }
filtered := make([]api.PinInfo, 0, len(pinInfos))
for _, pi := range pinInfos { for _, pi := range pinInfos {
if in.Match(pi.Status) { if filter.Match(pi.Status) {
filtered = append(filtered, pi) out <- pi
} }
} }
*out = filtered
return nil return nil
} }
@ -467,8 +471,8 @@ func (mock *mockPinTracker) Status(ctx context.Context, in cid.Cid, out *api.Pin
return nil return nil
} }
func (mock *mockPinTracker) RecoverAll(ctx context.Context, in struct{}, out *[]api.PinInfo) error { func (mock *mockPinTracker) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error {
*out = make([]api.PinInfo, 0) close(out)
return nil return nil
} }
@ -534,12 +538,10 @@ func (mock *mockIPFSConnector) PinLsCid(ctx context.Context, in api.Pin, out *ap
return nil return nil
} }
func (mock *mockIPFSConnector) PinLs(ctx context.Context, in string, out *map[string]api.IPFSPinStatus) error { func (mock *mockIPFSConnector) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error {
m := map[string]api.IPFSPinStatus{ out <- api.IPFSPinInfo{Cid: api.Cid(Cid1), Type: api.IPFSPinStatusRecursive}
Cid1.String(): api.IPFSPinStatusRecursive, out <- api.IPFSPinInfo{Cid: api.Cid(Cid3), Type: api.IPFSPinStatusRecursive}
Cid3.String(): api.IPFSPinStatusRecursive, close(out)
}
*out = m
return nil return nil
} }

View File

@ -300,7 +300,7 @@ func (d *MockDAGService) Get(ctx context.Context, cid cid.Cid) (format.Node, err
if n, ok := d.Nodes[cid]; ok { if n, ok := d.Nodes[cid]; ok {
return n, nil return n, nil
} }
return nil, format.ErrNotFound return nil, format.ErrNotFound{Cid: cid}
} }
// GetMany reads many nodes. // GetMany reads many nodes.
@ -312,7 +312,7 @@ func (d *MockDAGService) GetMany(ctx context.Context, cids []cid.Cid) <-chan *fo
if n, ok := d.Nodes[c]; ok { if n, ok := d.Nodes[c]; ok {
out <- &format.NodeOption{Node: n} out <- &format.NodeOption{Node: n}
} else { } else {
out <- &format.NodeOption{Err: format.ErrNotFound} out <- &format.NodeOption{Err: format.ErrNotFound{Cid: c}}
} }
} }
close(out) close(out)