2018-08-07 18:01:02 +00:00
|
|
|
package adder
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
"sync"
|
2018-08-07 18:01:02 +00:00
|
|
|
|
2022-06-15 09:19:17 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/api"
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
"go.uber.org/multierr"
|
2018-08-07 18:01:02 +00:00
|
|
|
|
|
|
|
cid "github.com/ipfs/go-cid"
|
|
|
|
ipld "github.com/ipfs/go-ipld-format"
|
2019-06-14 10:41:11 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
2018-10-17 13:28:03 +00:00
|
|
|
rpc "github.com/libp2p/go-libp2p-gorpc"
|
2018-08-07 18:01:02 +00:00
|
|
|
)
|
|
|
|
|
2019-08-13 14:02:43 +00:00
|
|
|
// ErrBlockAdder is returned when adding a to multiple destinations
|
|
|
|
// block fails on all of them.
|
|
|
|
var ErrBlockAdder = errors.New("failed to put block on all destinations")
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// BlockStreamer helps streaming nodes to multiple destinations, as long as
|
|
|
|
// one of them is still working.
|
|
|
|
type BlockStreamer struct {
|
2019-08-05 05:01:07 +00:00
|
|
|
dests []peer.ID
|
|
|
|
rpcClient *rpc.Client
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
blocks <-chan api.NodeWithMeta
|
|
|
|
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
errMu sync.Mutex
|
|
|
|
err error
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// NewBlockStreamer creates a BlockStreamer given an rpc client, allocated
|
|
|
|
// peers and a channel on which the blocks to stream are received.
|
|
|
|
func NewBlockStreamer(ctx context.Context, rpcClient *rpc.Client, dests []peer.ID, blocks <-chan api.NodeWithMeta) *BlockStreamer {
|
|
|
|
bsCtx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
bs := BlockStreamer{
|
|
|
|
ctx: bsCtx,
|
|
|
|
cancel: cancel,
|
2019-08-05 05:01:07 +00:00
|
|
|
dests: dests,
|
|
|
|
rpcClient: rpcClient,
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
blocks: blocks,
|
|
|
|
err: nil,
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
|
|
|
|
go bs.streamBlocks()
|
|
|
|
return &bs
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// Done returns a channel which gets closed when the BlockStreamer has
|
|
|
|
// finished.
|
|
|
|
func (bs *BlockStreamer) Done() <-chan struct{} {
|
|
|
|
return bs.ctx.Done()
|
|
|
|
}
|
2019-08-05 05:01:07 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
func (bs *BlockStreamer) setErr(err error) {
|
|
|
|
bs.errMu.Lock()
|
|
|
|
bs.err = err
|
|
|
|
bs.errMu.Unlock()
|
|
|
|
}
|
2018-08-07 18:01:02 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// Err returns any errors that happened after the operation of the
|
|
|
|
// BlockStreamer, for example when blocks could not be put to all nodes.
|
|
|
|
func (bs *BlockStreamer) Err() error {
|
|
|
|
bs.errMu.Lock()
|
|
|
|
defer bs.errMu.Unlock()
|
|
|
|
return bs.err
|
|
|
|
}
|
2019-08-05 05:01:07 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
func (bs *BlockStreamer) streamBlocks() {
|
|
|
|
defer bs.cancel()
|
2019-08-13 14:02:43 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// Nothing should be sent on out.
|
|
|
|
// We drain though
|
|
|
|
out := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for range out {
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
}()
|
2019-08-05 05:01:07 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
errs := bs.rpcClient.MultiStream(
|
|
|
|
bs.ctx,
|
|
|
|
bs.dests,
|
|
|
|
"IPFSConnector",
|
|
|
|
"BlockStream",
|
|
|
|
bs.blocks,
|
|
|
|
out,
|
|
|
|
)
|
2019-08-05 05:01:07 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
combinedErrors := multierr.Combine(errs...)
|
2022-03-28 11:59:09 +00:00
|
|
|
|
|
|
|
// FIXME: replicate everywhere.
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
if len(multierr.Errors(combinedErrors)) == len(bs.dests) {
|
|
|
|
logger.Error(combinedErrors)
|
|
|
|
bs.setErr(ErrBlockAdder)
|
2022-03-28 11:59:09 +00:00
|
|
|
} else if combinedErrors != nil {
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
logger.Warning("there were errors streaming blocks, but at least one destination succeeded")
|
|
|
|
logger.Warning(combinedErrors)
|
2019-08-05 05:01:07 +00:00
|
|
|
}
|
2018-08-07 18:01:02 +00:00
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
// IpldNodeToNodeWithMeta converts an ipld.Node to api.NodeWithMeta.
|
|
|
|
func IpldNodeToNodeWithMeta(n ipld.Node) api.NodeWithMeta {
|
2019-08-13 14:02:43 +00:00
|
|
|
size, err := n.Size()
|
|
|
|
if err != nil {
|
2020-03-13 20:40:02 +00:00
|
|
|
logger.Warn(err)
|
2019-08-13 14:02:43 +00:00
|
|
|
}
|
|
|
|
|
2022-03-19 01:52:46 +00:00
|
|
|
return api.NodeWithMeta{
|
2022-04-07 11:53:30 +00:00
|
|
|
Cid: api.NewCid(n.Cid()),
|
2019-08-13 14:02:43 +00:00
|
|
|
Data: n.RawData(),
|
|
|
|
CumSize: size,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-08 19:10:42 +00:00
|
|
|
// BlockAllocate helps allocating blocks to peers.
|
|
|
|
func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions) ([]peer.ID, error) {
|
|
|
|
// Find where to allocate this file
|
2019-02-27 17:04:35 +00:00
|
|
|
var allocsStr []peer.ID
|
2018-08-08 19:10:42 +00:00
|
|
|
err := rpc.CallContext(
|
|
|
|
ctx,
|
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"BlockAllocate",
|
2022-04-07 11:53:30 +00:00
|
|
|
api.PinWithOpts(api.CidUndef, pinOpts),
|
2018-08-08 19:10:42 +00:00
|
|
|
&allocsStr,
|
|
|
|
)
|
2019-02-27 17:04:35 +00:00
|
|
|
return allocsStr, err
|
2018-08-08 19:10:42 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 19:29:21 +00:00
|
|
|
// Pin helps sending local RPC pin requests.
|
2022-03-19 01:52:46 +00:00
|
|
|
func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
|
2018-08-08 19:10:42 +00:00
|
|
|
if pin.ReplicationFactorMin < 0 {
|
|
|
|
pin.Allocations = []peer.ID{}
|
|
|
|
}
|
|
|
|
logger.Debugf("adder pinning %+v", pin)
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
var pinResp api.Pin
|
2018-08-08 19:10:42 +00:00
|
|
|
return rpc.CallContext(
|
|
|
|
ctx,
|
|
|
|
"", // use ourself to pin
|
|
|
|
"Cluster",
|
|
|
|
"Pin",
|
2019-02-27 17:04:35 +00:00
|
|
|
pin,
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
&pinResp,
|
2018-08-08 19:10:42 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-08-07 18:01:02 +00:00
|
|
|
// ErrDAGNotFound is returned whenever we try to get a block from the DAGService.
|
2022-06-16 10:25:07 +00:00
|
|
|
var ErrDAGNotFound = errors.New("dagservice: a Get operation was attempted while cluster-adding (this is likely a bug)")
|
2018-08-07 18:01:02 +00:00
|
|
|
|
|
|
|
// BaseDAGService partially implements an ipld.DAGService.
|
|
|
|
// It provides the methods which are not needed by ClusterDAGServices
|
|
|
|
// (Get*, Remove*) so that they can save adding this code.
|
|
|
|
type BaseDAGService struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get always returns errNotFound
|
2018-09-22 01:00:10 +00:00
|
|
|
func (dag BaseDAGService) Get(ctx context.Context, key cid.Cid) (ipld.Node, error) {
|
2018-08-07 18:01:02 +00:00
|
|
|
return nil, ErrDAGNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetMany returns an output channel that always emits an error
|
2018-09-22 01:00:10 +00:00
|
|
|
func (dag BaseDAGService) GetMany(ctx context.Context, keys []cid.Cid) <-chan *ipld.NodeOption {
|
2018-08-07 18:01:02 +00:00
|
|
|
out := make(chan *ipld.NodeOption, 1)
|
2022-06-16 10:25:07 +00:00
|
|
|
out <- &ipld.NodeOption{Err: ErrDAGNotFound}
|
2018-08-07 18:01:02 +00:00
|
|
|
close(out)
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove is a nop
|
2018-09-22 01:00:10 +00:00
|
|
|
func (dag BaseDAGService) Remove(ctx context.Context, key cid.Cid) error {
|
2018-08-07 18:01:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveMany is a nop
|
2018-09-22 01:00:10 +00:00
|
|
|
func (dag BaseDAGService) RemoveMany(ctx context.Context, keys []cid.Cid) error {
|
2018-08-07 18:01:02 +00:00
|
|
|
return nil
|
|
|
|
}
|