Merge pull request #1646 from ipfs/fix/leaks-on-ipfs-restart

Fix: bad behaviour when adding and ipfs is down
This commit is contained in:
Hector Sanjuan 2022-04-29 22:57:26 +02:00 committed by GitHub
commit d89e62117b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -18,7 +18,6 @@ import (
"time" "time"
"github.com/ipfs/ipfs-cluster/api" "github.com/ipfs/ipfs-cluster/api"
"go.uber.org/multierr"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files" files "github.com/ipfs/go-ipfs-files"
@ -617,6 +616,7 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
} }
body, err := ioutil.ReadAll(res.Body) body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err == nil { if err == nil {
var ipfsErr ipfsError var ipfsErr ipfsError
if err := json.Unmarshal(body, &ipfsErr); err == nil { if err := json.Unmarshal(body, &ipfsErr); err == nil {
@ -1046,8 +1046,6 @@ func (ipfs *Connector) BlockStream(ctx context.Context, blocks <-chan api.NodeWi
logger.Debug("streaming blocks to IPFS") logger.Debug("streaming blocks to IPFS")
defer ipfs.updateInformerMetric(ctx) defer ipfs.updateInformerMetric(ctx)
var errs error
it := &chanIterator{ it := &chanIterator{
ctx: ctx, ctx: ctx,
blocks: blocks, blocks: blocks,
@ -1072,46 +1070,41 @@ func (ipfs *Connector) BlockStream(ctx context.Context, blocks <-chan api.NodeWi
} }
url := "block/put?" + q.Encode() url := "block/put?" + q.Encode()
// We essentially keep going on any request errors and keep putting // Now we stream the blocks to ipfs. In case of error, we return
// blocks until we are done. We will, however, return a final error if // directly, but leave a goroutine draining the channel until it is
// there were errors along the way, but we do not abort the blocks // closed, which should be soon after returning.
// stream because we could not block/put. multiFileR := files.NewMultiFileReader(dir, true)
for !it.Done() { contentType := "multipart/form-data; boundary=" + multiFileR.Boundary()
select { body, err := ipfs.postCtxStreamResponse(ctx, url, contentType, multiFileR)
case <-ctx.Done(): if err != nil {
logger.Error("BlockStream aborted: %s", ctx.Err()) return err
return ctx.Err() }
default: defer body.Close()
}
multiFileR := files.NewMultiFileReader(dir, true) dec := json.NewDecoder(body)
contentType := "multipart/form-data; boundary=" + multiFileR.Boundary() for {
body, err := ipfs.postCtxStreamResponse(ctx, url, contentType, multiFileR) var res ipfsBlockPutResp
err = dec.Decode(&res)
if err == io.EOF {
return nil
}
if err != nil { if err != nil {
errs = multierr.Append(errs, err) logger.Error(err)
continue break
} }
dec := json.NewDecoder(body) logger.Debugf("response block: %s", res.Key)
for { if !it.Seen(res.Key) {
var res ipfsBlockPutResp logger.Warningf("blockPut response CID (%s) does not match the multihash of any blocks sent", res.Key)
err := dec.Decode(&res)
if err == io.EOF {
break
}
if err != nil {
logger.Error(err)
errs = multierr.Append(errs, err)
break
}
logger.Debugf("response block: %s", res.Key)
if !it.Seen(res.Key) {
logger.Warningf("blockPut response CID (%s) does not match the multihash of any blocks sent", res.Key)
}
} }
// continue until it.Done()
} }
return errs // keep draining blocks channel until closed.
go func() {
for range blocks {
}
}()
return err
} }
// BlockGet retrieves an ipfs block with the given cid // BlockGet retrieves an ipfs block with the given cid