2018-07-04 16:30:24 +00:00
|
|
|
package sharding
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2022-07-08 14:15:49 +00:00
|
|
|
"sync"
|
2018-07-04 16:30:24 +00:00
|
|
|
|
2022-06-15 09:19:17 +00:00
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/adder"
|
|
|
|
"github.com/ipfs-cluster/ipfs-cluster/api"
|
2022-07-08 14:15:49 +00:00
|
|
|
ipld "github.com/ipfs/go-ipld-format"
|
2018-07-04 16:30:24 +00:00
|
|
|
|
|
|
|
cid "github.com/ipfs/go-cid"
|
2022-09-06 14:57:17 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p/core/peer"
|
2018-10-17 13:28:03 +00:00
|
|
|
rpc "github.com/libp2p/go-libp2p-gorpc"
|
2019-06-14 10:41:11 +00:00
|
|
|
|
|
|
|
humanize "github.com/dustin/go-humanize"
|
2018-07-04 16:30:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// a shard represents a set of blocks (or bucket) which have been assigned
|
|
|
|
// a peer to be block-put and will be part of the same shard in the
|
|
|
|
// cluster DAG.
|
|
|
|
type shard struct {
|
2022-07-08 14:15:49 +00:00
|
|
|
ctx context.Context
|
|
|
|
rpc *rpc.Client
|
|
|
|
allocations []peer.ID
|
|
|
|
pinOptions api.PinOptions
|
|
|
|
bs *adder.BlockStreamer
|
|
|
|
blocks chan api.NodeWithMeta
|
|
|
|
closeBlocksOnce sync.Once
|
2018-07-04 16:30:24 +00:00
|
|
|
// dagNode represents a node with links and will be converted
|
|
|
|
// to Cbor.
|
2018-09-22 01:00:10 +00:00
|
|
|
dagNode map[string]cid.Cid
|
2018-07-04 16:30:24 +00:00
|
|
|
currentSize uint64
|
|
|
|
sizeLimit uint64
|
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
func newShard(globalCtx context.Context, ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard, error) {
|
2018-08-08 19:10:42 +00:00
|
|
|
allocs, err := adder.BlockAllocate(ctx, rpc, opts)
|
2018-07-04 16:30:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-04-14 17:58:00 +00:00
|
|
|
if opts.ReplicationFactorMin > 0 && len(allocs) == 0 {
|
2018-07-19 13:17:27 +00:00
|
|
|
// This would mean that the empty cid is part of the shared state somehow.
|
|
|
|
panic("allocations for new shard cannot be empty without error")
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
2018-07-23 13:56:46 +00:00
|
|
|
if opts.ReplicationFactorMin < 0 {
|
2020-03-13 20:40:02 +00:00
|
|
|
logger.Warn("Shard is set to replicate everywhere ,which doesn't make sense for sharding")
|
2018-07-23 13:56:46 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 13:17:27 +00:00
|
|
|
// TODO (hector): get latest metrics for allocations, adjust sizeLimit
|
2020-02-03 09:30:04 +00:00
|
|
|
// to minimum. This can be done later.
|
2018-07-19 13:17:27 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
blocks := make(chan api.NodeWithMeta, 256)
|
|
|
|
|
2018-07-04 16:30:24 +00:00
|
|
|
return &shard{
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
ctx: globalCtx,
|
2018-07-04 16:30:24 +00:00
|
|
|
rpc: rpc,
|
2018-08-08 19:10:42 +00:00
|
|
|
allocations: allocs,
|
2018-07-19 13:17:27 +00:00
|
|
|
pinOptions: opts,
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
bs: adder.NewBlockStreamer(globalCtx, rpc, allocs, blocks),
|
|
|
|
blocks: blocks,
|
2018-09-22 01:00:10 +00:00
|
|
|
dagNode: make(map[string]cid.Cid),
|
2018-07-04 16:30:24 +00:00
|
|
|
currentSize: 0,
|
2018-07-19 13:17:27 +00:00
|
|
|
sizeLimit: opts.ShardSize,
|
2018-07-04 16:30:24 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddLink tries to add a new block to this shard if it's not full.
|
|
|
|
// Returns true if the block was added
|
2018-09-22 01:00:10 +00:00
|
|
|
func (sh *shard) AddLink(ctx context.Context, c cid.Cid, s uint64) {
|
2018-07-04 16:30:24 +00:00
|
|
|
linkN := len(sh.dagNode)
|
|
|
|
linkName := fmt.Sprintf("%d", linkN)
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Debugf("shard: add link: %s", linkName)
|
|
|
|
|
2018-07-04 16:30:24 +00:00
|
|
|
sh.dagNode[linkName] = c
|
|
|
|
sh.currentSize += s
|
|
|
|
}
|
|
|
|
|
2018-07-19 13:17:27 +00:00
|
|
|
// Allocations returns the peer IDs on which blocks are put for this shard.
|
|
|
|
func (sh *shard) Allocations() []peer.ID {
|
2022-03-15 10:32:12 +00:00
|
|
|
if len(sh.allocations) == 1 && sh.allocations[0] == "" {
|
|
|
|
return nil
|
|
|
|
}
|
2018-07-19 13:17:27 +00:00
|
|
|
return sh.allocations
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
func (sh *shard) sendBlock(ctx context.Context, n ipld.Node) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case sh.blocks <- adder.IpldNodeToNodeWithMeta(n):
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-08 14:15:49 +00:00
|
|
|
// Close stops any ongoing block streaming.
|
|
|
|
func (sh *shard) Close() error {
|
|
|
|
sh.closeBlocksOnce.Do(func() {
|
|
|
|
close(sh.blocks)
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-04 16:30:24 +00:00
|
|
|
// Flush completes the allocation of this shard by building a CBOR node
|
|
|
|
// and adding it to IPFS, then pinning it in cluster. It returns the Cid of the
|
|
|
|
// shard.
|
2018-09-22 01:00:10 +00:00
|
|
|
func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid, error) {
|
2018-07-19 13:17:27 +00:00
|
|
|
logger.Debugf("shard %d: flush", shardN)
|
2018-08-09 11:22:47 +00:00
|
|
|
nodes, err := makeDAG(ctx, sh.dagNode)
|
2018-07-04 16:30:24 +00:00
|
|
|
if err != nil {
|
2018-09-22 01:00:10 +00:00
|
|
|
return cid.Undef, err
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
for _, n := range nodes {
|
|
|
|
err = sh.sendBlock(ctx, n)
|
|
|
|
if err != nil {
|
|
|
|
close(sh.blocks)
|
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
}
|
2022-07-08 14:15:49 +00:00
|
|
|
|
|
|
|
sh.Close()
|
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return cid.Undef, ctx.Err()
|
|
|
|
case <-sh.bs.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := sh.bs.Err(); err != nil {
|
2018-09-22 01:00:10 +00:00
|
|
|
return cid.Undef, err
|
2018-07-19 13:17:27 +00:00
|
|
|
}
|
2018-07-04 16:30:24 +00:00
|
|
|
|
|
|
|
rootCid := nodes[0].Cid()
|
2022-04-07 11:53:30 +00:00
|
|
|
pin := api.PinWithOpts(api.NewCid(rootCid), sh.pinOptions)
|
2018-07-19 13:17:27 +00:00
|
|
|
pin.Name = fmt.Sprintf("%s-shard-%d", sh.pinOptions.Name, shardN)
|
|
|
|
// this sets allocations as priority allocation
|
|
|
|
pin.Allocations = sh.allocations
|
2018-07-04 16:30:24 +00:00
|
|
|
pin.Type = api.ShardType
|
2022-04-07 11:53:30 +00:00
|
|
|
ref := api.NewCid(prev)
|
|
|
|
pin.Reference = &ref
|
2018-07-04 16:30:24 +00:00
|
|
|
pin.MaxDepth = 1
|
2018-07-19 13:17:27 +00:00
|
|
|
pin.ShardSize = sh.Size() // use current size, not the limit
|
2018-07-04 16:30:24 +00:00
|
|
|
if len(nodes) > len(sh.dagNode)+1 { // using an indirect graph
|
|
|
|
pin.MaxDepth = 2
|
|
|
|
}
|
|
|
|
|
2018-08-08 19:10:42 +00:00
|
|
|
logger.Infof("shard #%d (%s) completed. Total size: %s. Links: %d",
|
2018-07-19 13:17:27 +00:00
|
|
|
shardN,
|
|
|
|
rootCid,
|
|
|
|
humanize.Bytes(sh.Size()),
|
|
|
|
len(sh.dagNode),
|
|
|
|
)
|
|
|
|
|
2018-08-08 19:10:42 +00:00
|
|
|
return rootCid, adder.Pin(ctx, sh.rpc, pin)
|
2018-07-04 16:30:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns this shard's current size.
|
|
|
|
func (sh *shard) Size() uint64 {
|
|
|
|
return sh.currentSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns this shard's size limit.
|
|
|
|
func (sh *shard) Limit() uint64 {
|
|
|
|
return sh.sizeLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
// Last returns the last added link. When finishing sharding,
|
|
|
|
// the last link of the last shard is the data root for the
|
|
|
|
// full sharded DAG (the CID that would have resulted from
|
|
|
|
// adding the content to a single IPFS daemon).
|
2018-09-22 01:00:10 +00:00
|
|
|
func (sh *shard) LastLink() cid.Cid {
|
2018-07-04 16:30:24 +00:00
|
|
|
l := len(sh.dagNode)
|
|
|
|
lastLink := fmt.Sprintf("%d", l-1)
|
|
|
|
return sh.dagNode[lastLink]
|
|
|
|
}
|