ipfs-cluster/consensus/crdt/consensus.go

458 lines
10 KiB
Go
Raw Normal View History

Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
package crdt
import (
"context"
"errors"
"sync"
ipfslite "github.com/hsanjuan/ipfs-lite"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state"
"github.com/ipfs/ipfs-cluster/state/dsstate"
multihash "github.com/multiformats/go-multihash"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
query "github.com/ipfs/go-datastore/query"
crdt "github.com/ipfs/go-ds-crdt"
logging "github.com/ipfs/go-log"
rpc "github.com/libp2p/go-libp2p-gorpc"
host "github.com/libp2p/go-libp2p-host"
dht "github.com/libp2p/go-libp2p-kad-dht"
peer "github.com/libp2p/go-libp2p-peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
)
var logger = logging.Logger("crdt")
var (
blocksNs = "b" // blockstore namespace
)
// Common variables for the module.
var (
ErrNoLeader = errors.New("crdt consensus component does not provide a leader")
ErrRmPeer = errors.New("crdt consensus component cannot remove peers")
)
// Consensus implement ipfscluster.Consensus and provides the facility to add
// and remove pins from the Cluster shared state. It uses a CRDT-backed
// implementation of go-datastore (go-ds-crdt).
type Consensus struct {
ctx context.Context
cancel context.CancelFunc
config *Config
trustedPeers sync.Map
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
host host.Host
store ds.Datastore
namespace ds.Key
state state.State
crdt *crdt.Datastore
dht *dht.IpfsDHT
pubsub *pubsub.PubSub
rpcClient *rpc.Client
rpcReady chan struct{}
readyCh chan struct{}
shutdownLock sync.RWMutex
shutdown bool
}
// New creates a new crdt Consensus component. The given PubSub will be used to
// broadcast new heads. The given thread-safe datastore will be used to persist
// data and all will be prefixed with cfg.DatastoreNamespace.
func New(
host host.Host,
dht *dht.IpfsDHT,
pubsub *pubsub.PubSub,
cfg *Config,
store ds.Datastore,
) (*Consensus, error) {
err := cfg.Validate()
if err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(context.Background())
css := &Consensus{
ctx: ctx,
cancel: cancel,
config: cfg,
host: host,
dht: dht,
store: store,
namespace: ds.NewKey(cfg.DatastoreNamespace),
pubsub: pubsub,
rpcReady: make(chan struct{}, 1),
readyCh: make(chan struct{}, 1),
}
// Set up a fast-lookup trusted peers cache.
for _, p := range css.config.TrustedPeers {
css.Trust(ctx, p)
}
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
go css.setup()
return css, nil
}
func (css *Consensus) setup() {
select {
case <-css.ctx.Done():
return
case <-css.rpcReady:
}
// Hash the cluster name and produce the topic name from there
// as a way to avoid pubsub topic collisions with other
// pubsub applications potentially when both potentially use
// simple names like "test".
topicName := css.config.ClusterName
topicHash, err := multihash.Sum([]byte(css.config.ClusterName), multihash.MD5, -1)
if err != nil {
logger.Errorf("error hashing topic: %s", err)
} else {
topicName = topicHash.B58String()
}
// Validate pubsub messages for our topic (only accept
// from trusted sources)
err = css.pubsub.RegisterTopicValidator(
topicName,
func(ctx context.Context, p peer.ID, msg *pubsub.Message) bool {
return css.IsTrustedPeer(ctx, p)
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
},
)
if err != nil {
logger.Errorf("error registering topic validator: %s", err)
}
var blocksDatastore ds.Batching
blocksDatastore = namespace.Wrap(css.store, css.namespace.ChildString(blocksNs))
ipfs, err := ipfslite.New(
css.ctx,
blocksDatastore,
css.host,
css.dht,
&ipfslite.Config{
Offline: false,
},
)
if err != nil {
logger.Errorf("error creating ipfs-lite: %s", err)
return
}
dagSyncer := newLiteDAGSyncer(css.ctx, ipfs)
broadcaster, err := crdt.NewPubSubBroadcaster(
css.ctx,
css.pubsub,
topicName, // subscription name
)
if err != nil {
logger.Errorf("error creating broadcaster: %s", err)
return
}
opts := crdt.DefaultOptions()
opts.RebroadcastInterval = css.config.RebroadcastInterval
opts.Logger = logger
opts.PutHook = func(k ds.Key, v []byte) {
pin := &api.Pin{}
err := pin.ProtoUnmarshal(v)
if err != nil {
logger.Error(err)
return
}
// TODO: tracing for this context
err = css.rpcClient.CallContext(
css.ctx,
"",
"PinTracker",
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
"Track",
pin,
&struct{}{},
)
if err != nil {
logger.Error(err)
}
logger.Infof("new pin added: %s", pin.Cid)
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
}
opts.DeleteHook = func(k ds.Key) {
c, err := dshelp.DsKeyToCid(k)
if err != nil {
logger.Error(err, k)
return
}
pin := api.PinCid(c)
err = css.rpcClient.CallContext(
css.ctx,
"",
"PinTracker",
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
"Untrack",
pin,
&struct{}{},
)
if err != nil {
logger.Error(err)
}
logger.Infof("pin removed: %s", c)
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
}
crdt, err := crdt.New(
css.store,
css.namespace,
dagSyncer,
broadcaster,
opts,
)
if err != nil {
logger.Error(err)
return
}
css.crdt = crdt
clusterState, err := dsstate.New(
css.crdt,
// unsure if we should set something else but crdt is already
// namespaced and this would only namespace the keys, which only
// complicates things.
"",
dsstate.DefaultHandle(),
)
if err != nil {
logger.Errorf("error creating cluster state datastore: %s", err)
return
}
css.state = clusterState
css.readyCh <- struct{}{}
}
// Shutdown closes this component, cancelling the pubsub subscription.
func (css *Consensus) Shutdown(ctx context.Context) error {
css.shutdownLock.Lock()
defer css.shutdownLock.Unlock()
if css.shutdown {
logger.Debug("already shutdown")
return nil
}
logger.Info("stopping Consensus component")
css.cancel()
// Only close crdt after cancelling the context, otherwise
// the pubsub broadcaster stays on and locks it.
if crdt := css.crdt; crdt != nil {
crdt.Close()
}
if css.config.hostShutdown {
css.host.Close()
}
css.shutdown = true
close(css.rpcReady)
return nil
}
// SetClient gives the component the ability to communicate and
// leaves it ready to use.
func (css *Consensus) SetClient(c *rpc.Client) {
css.rpcClient = c
css.rpcReady <- struct{}{}
}
// Ready returns a channel which is signalled when the component
// is ready to use.
func (css *Consensus) Ready(ctx context.Context) <-chan struct{} {
return css.readyCh
}
// IsTrustedPeer returns whether the given peer is taken into account
// when submitting updates to the consensus state.
func (css *Consensus) IsTrustedPeer(ctx context.Context, pid peer.ID) bool {
if pid == css.host.ID() {
return true
}
_, ok := css.trustedPeers.Load(pid)
return ok
}
// Trust marks a peer as "trusted".
func (css *Consensus) Trust(ctx context.Context, pid peer.ID) error {
css.trustedPeers.Store(pid, struct{}{})
return nil
}
// Distrust removes a peer from the "trusted" set.
func (css *Consensus) Distrust(ctx context.Context, pid peer.ID) error {
css.trustedPeers.Delete(pid)
return nil
}
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
// LogPin adds a new pin to the shared state.
func (css *Consensus) LogPin(ctx context.Context, pin *api.Pin) error {
return css.state.Add(ctx, pin)
}
// LogUnpin removes a pin from the shared state.
func (css *Consensus) LogUnpin(ctx context.Context, pin *api.Pin) error {
return css.state.Rm(ctx, pin.Cid)
}
// Peers returns the current known peerset. It uses
// the monitor component and considers every peer with
// valid known metrics a member.
func (css *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
var metrics []*api.Metric
err := css.rpcClient.CallContext(
ctx,
"",
"PeerMonitor",
"LatestMetrics",
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
css.config.PeersetMetric,
&metrics,
)
if err != nil {
return nil, err
}
var peers []peer.ID
selfIncluded := false
for _, m := range metrics {
peers = append(peers, m.Peer)
if m.Peer == css.host.ID() {
selfIncluded = true
}
}
// Always include self
if !selfIncluded {
peers = append(peers, css.host.ID())
}
return peers, nil
}
// WaitForSync is a no-op as it is not necessary to be fully synced for the
// component to be usable.
func (css *Consensus) WaitForSync(ctx context.Context) error { return nil }
// AddPeer is a no-op as we do not need to do peerset management with
// Merkle-CRDTs. Therefore adding a peer to the peerset means doing nothing.
func (css *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
return nil
}
Consensus: add new "crdt" consensus component This adds a new "crdt" consensus component using go-ds-crdt. This implies several refactors to fully make cluster consensus-component independent: * Delete mapstate and fully adopt dsstate (after people have migrated). * Return errors from state methods rather than ignoring them. * Add a new "datastore" modules so that we can configure datastores in the main configuration like other components. * Let the consensus components fully define the "state.State". Thus, they do not receive the state, they receive the storage where we put the state (a go-datastore). * Allow to customize how the monitor component obtains Peers() (the current peerset), including avoiding using the current peerset. At the moment the crdt consensus uses the monitoring component to define the current peerset. Therefore the monitor component cannot rely on the consensus component to produce a peerset. * Re-factor/re-implementation of "ipfs-cluster-service state" operations. Includes the dissapearance of the "migrate" one. The CRDT consensus component defines creates a crdt-datastore (with ipfs-lite) and uses it to intitialize a dssate. Thus the crdt-store is elegantly wrapped. Any modifications to the state get automatically replicated to other peers. We store all the CRDT DAG blocks in the local datastore. The consensus components only expose a ReadOnly state, as any modifications to the shared state should happen through them. DHT and PubSub facilities must now be created outside of Cluster and passed in so they can be re-used by different components.
2019-02-20 14:24:25 +00:00
// RmPeer is a no-op which always errors, as, since we do not do peerset
// management, we also have no ability to remove a peer from it.
func (css *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
return ErrRmPeer
}
// State returns the cluster shared state.
func (css *Consensus) State(ctx context.Context) (state.ReadOnly, error) { return css.state, nil }
// Clean deletes all crdt-consensus datas from the datastore.
func (css *Consensus) Clean(ctx context.Context) error {
return Clean(ctx, css.config, css.store)
}
// Clean deletes all crdt-consensus datas from the given datastore.
func Clean(ctx context.Context, cfg *Config, store ds.Datastore) error {
logger.Info("cleaning all CRDT data from datastore")
q := query.Query{
Prefix: cfg.DatastoreNamespace,
KeysOnly: true,
}
results, err := store.Query(q)
if err != nil {
return err
}
defer results.Close()
for r := range results.Next() {
if r.Error != nil {
return err
}
k := ds.NewKey(r.Key)
err := store.Delete(k)
if err != nil {
// do not die, continue cleaning
logger.Error(err)
}
}
return nil
}
// Leader returns ErrNoLeader.
func (css *Consensus) Leader(ctx context.Context) (peer.ID, error) {
return "", ErrNoLeader
}
// OfflineState returns an offline, read-only batching state using the given
// datastore. Any writes to this state are processed through the given
// ipfs connector (the state is offline as it does not require a
// running cluster peer).
func OfflineState(cfg *Config, store ds.Datastore) (state.BatchingState, error) {
batching, ok := store.(ds.Batching)
if !ok {
return nil, errors.New("must provide a Bathing datastore")
}
opts := crdt.DefaultOptions()
opts.Logger = logger
var blocksDatastore ds.Batching
blocksDatastore = namespace.Wrap(
batching,
ds.NewKey(cfg.DatastoreNamespace).ChildString(blocksNs),
)
ipfs, err := ipfslite.New(
context.Background(),
blocksDatastore,
nil,
nil,
&ipfslite.Config{
Offline: true,
},
)
if err != nil {
return nil, err
}
dags := newLiteDAGSyncer(context.Background(), ipfs)
crdt, err := crdt.New(
batching,
ds.NewKey(cfg.DatastoreNamespace),
dags,
nil,
opts,
)
if err != nil {
return nil, err
}
return dsstate.NewBatching(crdt, "", dsstate.DefaultHandle())
}