Support a levelDB backend for cluster

Badger can take 1000x the amount of needed space if not GC'ed or compacted
(#1320), even for non heavy usage. Cluster has no provisions to run datastore
GC operations and while they could be added, they are not ensured to
help. Improvements on Badger v3 might help but would still need to GC
explicitally.

Cluster was however designed to support any go-datastore as backend.

This commit adds LevelDB support. LevelDB go-datastore wrapper is mature, does
not need GC and should work well for most cluster usecases, which are not
overly demanding.

A new `--datastore` flag has been added on init. The store backend is selected
based on the value in the configuration, similar to how raft/crdt is. The
default is set to leveldb. From now on it should be easier to add additional
backends, i.e. badgerv3.
This commit is contained in:
Hector Sanjuan 2021-06-09 19:40:36 +02:00
parent bfe179e943
commit 0eef0ede89
11 changed files with 493 additions and 47 deletions

View File

@ -17,6 +17,8 @@ import (
"github.com/ipfs/ipfs-cluster/cmdutils"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/datastore/badger"
"github.com/ipfs/ipfs-cluster/datastore/leveldb"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
@ -202,7 +204,10 @@ func initCluster(c *cli.Context, ignoreReinit bool, cfgURL string) error {
cfgURL = fmt.Sprintf("http://%s/ipns/%s", gw, cfgURL)
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt")
// Setting the datastore here is useless, as we initialize with remote
// config and we will have an empty service.json with the source only.
// That source will decide which datastore is actually used.
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", "")
cfgHelper.Manager().Shutdown()
cfgHelper.Manager().Source = cfgURL
err := cfgHelper.Manager().Default()
@ -281,7 +286,7 @@ func runCmd(c *cli.Context) error {
cfgHelper.Manager().Shutdown()
cfgs := cfgHelper.Configs()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgs)
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.GetDatastore(), cfgHelper.Identity(), cfgs)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating state manager"), 1)
}
@ -407,32 +412,71 @@ func listCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
absPath, configPath, identityPath := buildPaths(c, clusterName)
err := printStatusOnline(absPath, clusterName)
if err != nil {
apiErr, ok := err.(*api.Error)
if ok && apiErr.Code != 0 {
return cli.Exit(
errors.Wrapf(
err,
"The Peer API seems to be running but returned with code %d",
apiErr.Code,
), 1)
}
// Generate a default config just for the purpose of having
// a badger configuration that the state manager can use to
// open and read the database.
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt")
cfgHelper.Manager().Shutdown() // not needed
cfgHelper.Manager().Default() // we have a default crdt/Badger config
cfgHelper.Configs().Badger.SetBaseDir(absPath)
cfgHelper.Manager().ApplyEnvVars()
err := printStatusOffline(cfgHelper)
if err != nil {
return cli.Exit(errors.Wrap(err, "error obtaining the pinset"), 1)
}
if !isInitialized(absPath) {
printNotInitialized(clusterName)
return cli.Exit("", 1)
}
err := printStatusOnline(absPath, clusterName)
if err == nil {
return nil
}
// There was an error. Try offline status
apiErr, ok := err.(*api.Error)
if ok && apiErr.Code != 0 {
return cli.Exit(
errors.Wrapf(
err,
"The Peer API seems to be running but returned with code %d",
apiErr.Code,
), 1)
}
// We are on offline mode so we cannot rely on IPFS being
// running and most probably our configuration is remote and
// to be loaded from IPFS. Thus we need to find a different
// way to decide whether to load badger/leveldb, and once we
// know, do it with the default settings.
hasLevelDB := false
lDBCfg := &leveldb.Config{}
lDBCfg.SetBaseDir(absPath)
lDBCfg.Default()
levelDBInfo, err := os.Stat(lDBCfg.GetFolder())
if err == nil && levelDBInfo.IsDir() {
hasLevelDB = true
}
hasBadger := false
badgerCfg := &badger.Config{}
badgerCfg.SetBaseDir(absPath)
badgerCfg.Default()
badgerInfo, err := os.Stat(badgerCfg.GetFolder())
if err == nil && badgerInfo.IsDir() {
hasBadger = true
}
if hasLevelDB && hasBadger {
return cli.Exit(errors.Wrapf(err, "found both leveldb (%s) and badger (%s) folders: cannot determine which to use in offline mode", lDBCfg.GetFolder(), badgerCfg.GetFolder()), 1)
}
// Since things were initialized, assume there is one at least.
dstoreType := "leveldb"
if hasBadger {
dstoreType = "badger"
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", dstoreType)
cfgHelper.Manager().Shutdown() // not needed
cfgHelper.Configs().Badger.SetBaseDir(absPath)
cfgHelper.Configs().LevelDB.SetBaseDir(absPath)
cfgHelper.Manager().Default() // we have a default crdt config with either leveldb or badger registered.
cfgHelper.Manager().ApplyEnvVars()
err = printStatusOffline(cfgHelper)
if err != nil {
return cli.Exit(errors.Wrap(err, "error obtaining the pinset"), 1)
}
return nil
}

View File

@ -224,7 +224,7 @@ func bootstrap(ctx context.Context, cluster *ipfscluster.Cluster, bootstraps []m
}
func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore {
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgHelper.Configs())
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.GetDatastore(), cfgHelper.Identity(), cfgHelper.Configs())
checkErr("creating state manager", err)
store, err := stmgr.GetStore()
checkErr("creating datastore", err)

View File

@ -29,7 +29,7 @@ func (l *lock) lock() {
}
// we should have a config folder whenever we try to lock
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "")
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "", "")
cfgHelper.MakeConfigFolder()
// set the lock file within this function

View File

@ -31,6 +31,7 @@ const programName = "ipfs-cluster-service"
const (
defaultLogLevel = "info"
defaultConsensus = "crdt"
defaultDatastore = "leveldb"
)
const (
@ -60,17 +61,17 @@ using LibP2P. This is a simplified view of the components:
| HTTP(s)
ipfs-cluster-service | HTTP
+----------+--------+--v--+----------------------+ +-------------+
| RPC/Raft | Peer 1 | API | IPFS Connector/Proxy +------> IPFS daemon |
| RPC | Peer 1 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----^-----+--------+-----+----------------------+ +-------------+
| libp2p
|
+----v-----+--------+-----+----------------------+ +-------------+
| RPC/Raft | Peer 2 | API | IPFS Connector/Proxy +------> IPFS daemon |
| RPC | Peer 2 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----^-----+--------+-----+----------------------+ +-------------+
|
|
+----v-----+--------+-----+----------------------+ +-------------+
| RPC/Raft | Peer 3 | API | IPFS Connector/Proxy +------> IPFS daemon |
| RPC | Peer 3 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----------+--------+-----+----------------------+ +-------------+
@ -265,6 +266,11 @@ the peer IDs in the given multiaddresses.
Usage: "select consensus component: 'crdt' or 'raft'",
Value: defaultConsensus,
},
cli.StringFlag{
Name: "datastore",
Usage: "select datastore component: 'leveldb' or 'badger'",
Value: defaultDatastore,
},
cli.BoolFlag{
Name: "custom-secret, s",
Usage: "prompt for the cluster secret (when no source specified)",
@ -284,11 +290,20 @@ the peer IDs in the given multiaddresses.
},
Action: func(c *cli.Context) error {
consensus := c.String("consensus")
if consensus != "raft" && consensus != "crdt" {
switch consensus {
case "raft", "crdt":
default:
checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'"))
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus)
datastore := c.String("datastore")
switch datastore {
case "leveldb", "badger":
default:
checkErr("choosing datastore", errors.New("flag value must be set to 'leveldb' or 'badger'"))
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus, datastore)
defer cfgHelper.Manager().Shutdown() // wait for saves
configExists := false

View File

@ -14,6 +14,7 @@ import (
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/datastore/badger"
"github.com/ipfs/ipfs-cluster/datastore/leveldb"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
@ -37,6 +38,7 @@ type Configs struct {
Metrics *observations.MetricsConfig
Tracing *observations.TracingConfig
Badger *badger.Config
LevelDB *leveldb.Config
}
// ConfigHelper helps managing the configuration and identity files with the
@ -49,16 +51,18 @@ type ConfigHelper struct {
configPath string
identityPath string
consensus string
datastore string
}
// NewConfigHelper creates a config helper given the paths to the
// configuration and identity files.
// Remember to Shutdown() the ConfigHelper.Manager() after use.
func NewConfigHelper(configPath, identityPath, consensus string) *ConfigHelper {
func NewConfigHelper(configPath, identityPath, consensus, datastore string) *ConfigHelper {
ch := &ConfigHelper{
configPath: configPath,
identityPath: identityPath,
consensus: consensus,
datastore: datastore,
}
ch.init()
return ch
@ -68,7 +72,7 @@ func NewConfigHelper(configPath, identityPath, consensus string) *ConfigHelper {
// configuration and identity files and loads the configurations from disk.
// Remember to Shutdown() the ConfigHelper.Manager() after use.
func NewLoadedConfigHelper(configPath, identityPath string) (*ConfigHelper, error) {
cfgHelper := NewConfigHelper(configPath, identityPath, "")
cfgHelper := NewConfigHelper(configPath, identityPath, "", "")
err := cfgHelper.LoadFromDisk()
return cfgHelper, err
}
@ -153,7 +157,7 @@ func (ch *ConfigHelper) Configs() *Configs {
// then it returns that.
//
// Otherwise it checks whether one of the consensus configurations
// has been loaded. If both or non have been loaded, it returns
// has been loaded. If both or none have been loaded, it returns
// an empty string.
func (ch *ConfigHelper) GetConsensus() string {
if ch.consensus != "" {
@ -171,6 +175,39 @@ func (ch *ConfigHelper) GetConsensus() string {
return ch.configs.Raft.ConfigKey()
}
// GetDatastore attempts to return the configured datastore. If the
// ConfigHelper was initialized with a datastore string, then it returns that.
//
// Otherwise it checks whether one of the datastore configurations has been
// loaded. If none or more than one have been loaded, it returns an empty
// string. Otherwise it returns the key of the loaded configuration.
func (ch *ConfigHelper) GetDatastore() string {
if ch.datastore != "" {
return ch.datastore
}
badgerLoaded := ch.manager.IsLoadedFromJSON(config.Datastore, ch.configs.Badger.ConfigKey())
levelDBLoaded := ch.manager.IsLoadedFromJSON(config.Datastore, ch.configs.LevelDB.ConfigKey())
nLoaded := 0
for _, v := range []bool{badgerLoaded, levelDBLoaded} {
if v {
nLoaded++
}
}
if nLoaded == 0 || nLoaded > 1 {
return ""
}
switch {
case badgerLoaded:
return ch.configs.Badger.ConfigKey()
case levelDBLoaded:
return ch.configs.LevelDB.ConfigKey()
default:
return ""
}
}
// register all current cluster components
func (ch *ConfigHelper) init() {
man := config.NewManager()
@ -187,6 +224,7 @@ func (ch *ConfigHelper) init() {
Metrics: &observations.MetricsConfig{},
Tracing: &observations.TracingConfig{},
Badger: &badger.Config{},
LevelDB: &leveldb.Config{},
}
man.RegisterComponent(config.Cluster, cfgs.Cluster)
man.RegisterComponent(config.API, cfgs.Restapi)
@ -198,16 +236,31 @@ func (ch *ConfigHelper) init() {
man.RegisterComponent(config.Observations, cfgs.Metrics)
man.RegisterComponent(config.Observations, cfgs.Tracing)
registerDatastores := false
switch ch.consensus {
case cfgs.Raft.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Raft)
case cfgs.Crdt.ConfigKey():
man.RegisterComponent(config.Consensus, cfgs.Crdt)
man.RegisterComponent(config.Datastore, cfgs.Badger)
registerDatastores = true
default:
man.RegisterComponent(config.Consensus, cfgs.Raft)
man.RegisterComponent(config.Consensus, cfgs.Crdt)
man.RegisterComponent(config.Datastore, cfgs.Badger)
registerDatastores = true
}
if registerDatastores {
switch ch.datastore {
case cfgs.Badger.ConfigKey():
man.RegisterComponent(config.Datastore, cfgs.Badger)
case cfgs.LevelDB.ConfigKey():
man.RegisterComponent(config.Datastore, cfgs.LevelDB)
default:
man.RegisterComponent(config.Datastore, cfgs.LevelDB)
man.RegisterComponent(config.Datastore, cfgs.Badger)
}
}
ch.identity = &config.Identity{}

View File

@ -14,6 +14,7 @@ import (
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/datastore/badger"
"github.com/ipfs/ipfs-cluster/datastore/inmem"
"github.com/ipfs/ipfs-cluster/datastore/leveldb"
"github.com/ipfs/ipfs-cluster/pstoremgr"
"github.com/ipfs/ipfs-cluster/state"
@ -32,12 +33,15 @@ type StateManager interface {
// NewStateManager returns an state manager implementation for the given
// consensus ("raft" or "crdt"). It will need initialized configs.
func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (StateManager, error) {
func NewStateManager(consensus string, datastore string, ident *config.Identity, cfgs *Configs) (StateManager, error) {
switch consensus {
case cfgs.Raft.ConfigKey():
return &raftStateManager{ident, cfgs}, nil
case cfgs.Crdt.ConfigKey():
return &crdtStateManager{cfgs}, nil
return &crdtStateManager{
cfgs: cfgs,
datastore: datastore,
}, nil
case "":
return nil, errors.New("could not determine the consensus component")
default:
@ -50,6 +54,7 @@ func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (S
func NewStateManagerWithHelper(cfgHelper *ConfigHelper) (StateManager, error) {
return NewStateManager(
cfgHelper.GetConsensus(),
cfgHelper.GetDatastore(),
cfgHelper.Identity(),
cfgHelper.Configs(),
)
@ -113,15 +118,20 @@ func (raftsm *raftStateManager) Clean() error {
}
type crdtStateManager struct {
cfgs *Configs
cfgs *Configs
datastore string
}
func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) {
bds, err := badger.New(crdtsm.cfgs.Badger)
if err != nil {
return nil, err
switch crdtsm.datastore {
case crdtsm.cfgs.Badger.ConfigKey():
return badger.New(crdtsm.cfgs.Badger)
case crdtsm.cfgs.LevelDB.ConfigKey():
return leveldb.New(crdtsm.cfgs.LevelDB)
default:
return nil, errors.New("unknown datastore")
}
return bds, nil
}
func (crdtsm *crdtStateManager) GetOfflineState(store ds.Datastore) (state.State, error) {

View File

@ -12,7 +12,7 @@ var cfgJSON = []byte(`
"folder": "test",
"badger_options": {
"max_levels": 4,
"value_log_loading_mode": 0
"value_log_loading_mode": 0
}
}
`)

243
datastore/leveldb/config.go Normal file
View File

@ -0,0 +1,243 @@
package leveldb
import (
"encoding/json"
"errors"
"path/filepath"
"github.com/imdario/mergo"
"github.com/ipfs/ipfs-cluster/config"
"github.com/kelseyhightower/envconfig"
goleveldb "github.com/syndtr/goleveldb/leveldb/opt"
)
const configKey = "leveldb"
const envConfigKey = "cluster_leveldb"
// Default values for LevelDB Config
const (
DefaultSubFolder = "leveldb"
)
var (
// DefaultLevelDBOptions carries default options. Values are customized during Init().
DefaultLevelDBOptions goleveldb.Options
)
func init() {
// go-ipfs uses defaults and only allows to configure compression, but
// otherwise stores a small amount of values in LevelDB.
// We leave defaults.
// Example:
DefaultLevelDBOptions.NoSync = false
}
// Config is used to initialize a LevelDB datastore. It implements the
// ComponentConfig interface.
type Config struct {
config.Saver
// The folder for this datastore. Non-absolute paths are relative to
// the base configuration folder.
Folder string
LevelDBOptions goleveldb.Options
}
// levelDBOptions allows json serialization in our configuration of the
// goleveldb Options.
type levelDBOptions struct {
BlockCacheCapacity int `json:"block_cache_capacity"`
BlockCacheEvictRemoved bool `json:"block_cache_evict_removed"`
BlockRestartInterval int `json:"block_restart_interval"`
BlockSize int `json:"block_size"`
CompactionExpandLimitFactor int `json:"compaction_expand_limit_factor"`
CompactionGPOverlapsFactor int `json:"compaction_gp_overlaps_factor"`
CompactionL0Trigger int `json:"compaction_l0_trigger"`
CompactionSourceLimitFactor int `json:"compaction_source_limit_factor"`
CompactionTableSize int `json:"compaction_table_size"`
CompactionTableSizeMultiplier float64 `json:"compaction_table_size_multiplier"`
CompactionTableSizeMultiplierPerLevel []float64 `json:"compaction_table_size_multiplier_per_level"`
CompactionTotalSize int `json:"compaction_total_size"`
CompactionTotalSizeMultiplier float64 `json:"compaction_total_size_multiplier"`
CompactionTotalSizeMultiplierPerLevel []float64 `json:"compaction_total_size_multiplier_per_level"`
Compression uint `json:"compression"`
DisableBufferPool bool `json:"disable_buffer_pool"`
DisableBlockCache bool `json:"disable_block_cache"`
DisableCompactionBackoff bool `json:"disable_compaction_backoff"`
DisableLargeBatchTransaction bool `json:"disable_large_batch_transaction"`
IteratorSamplingRate int `json:"iterator_sampling_rate"`
NoSync bool `json:"no_sync"`
NoWriteMerge bool `json:"no_write_merge"`
OpenFilesCacheCapacity int `json:"open_files_cache_capacity"`
ReadOnly bool `json:"read_only"`
Strict uint `json:"strict"`
WriteBuffer int `json:"write_buffer"`
WriteL0PauseTrigger int `json:"write_l0_pause_trigger"`
WriteL0SlowdownTrigger int `json:"write_l0_slowdown_trigger"`
}
func (ldbo *levelDBOptions) Unmarshal() *goleveldb.Options {
goldbo := &goleveldb.Options{}
goldbo.BlockCacheCapacity = ldbo.BlockCacheCapacity
goldbo.BlockCacheEvictRemoved = ldbo.BlockCacheEvictRemoved
goldbo.BlockRestartInterval = ldbo.BlockRestartInterval
goldbo.BlockSize = ldbo.BlockSize
goldbo.CompactionExpandLimitFactor = ldbo.CompactionExpandLimitFactor
goldbo.CompactionGPOverlapsFactor = ldbo.CompactionGPOverlapsFactor
goldbo.CompactionL0Trigger = ldbo.CompactionL0Trigger
goldbo.CompactionSourceLimitFactor = ldbo.CompactionSourceLimitFactor
goldbo.CompactionTableSize = ldbo.CompactionTableSize
goldbo.CompactionTableSizeMultiplier = ldbo.CompactionTableSizeMultiplier
goldbo.CompactionTableSizeMultiplierPerLevel = ldbo.CompactionTableSizeMultiplierPerLevel
goldbo.CompactionTotalSize = ldbo.CompactionTotalSize
goldbo.CompactionTotalSizeMultiplier = ldbo.CompactionTotalSizeMultiplier
goldbo.CompactionTotalSizeMultiplierPerLevel = ldbo.CompactionTotalSizeMultiplierPerLevel
goldbo.Compression = goleveldb.Compression(ldbo.Compression)
goldbo.DisableBufferPool = ldbo.DisableBufferPool
goldbo.DisableBlockCache = ldbo.DisableBlockCache
goldbo.DisableCompactionBackoff = ldbo.DisableCompactionBackoff
goldbo.DisableLargeBatchTransaction = ldbo.DisableLargeBatchTransaction
goldbo.IteratorSamplingRate = ldbo.IteratorSamplingRate
goldbo.NoSync = ldbo.NoSync
goldbo.NoWriteMerge = ldbo.NoWriteMerge
goldbo.OpenFilesCacheCapacity = ldbo.OpenFilesCacheCapacity
goldbo.ReadOnly = ldbo.ReadOnly
goldbo.Strict = goleveldb.Strict(ldbo.Strict)
goldbo.WriteBuffer = ldbo.WriteBuffer
goldbo.WriteL0PauseTrigger = ldbo.WriteL0PauseTrigger
goldbo.WriteL0SlowdownTrigger = ldbo.WriteL0SlowdownTrigger
return goldbo
}
func (ldbo *levelDBOptions) Marshal(goldbo *goleveldb.Options) {
ldbo.BlockCacheCapacity = goldbo.BlockCacheCapacity
ldbo.BlockCacheEvictRemoved = goldbo.BlockCacheEvictRemoved
ldbo.BlockRestartInterval = goldbo.BlockRestartInterval
ldbo.BlockSize = goldbo.BlockSize
ldbo.CompactionExpandLimitFactor = goldbo.CompactionExpandLimitFactor
ldbo.CompactionGPOverlapsFactor = goldbo.CompactionGPOverlapsFactor
ldbo.CompactionL0Trigger = goldbo.CompactionL0Trigger
ldbo.CompactionSourceLimitFactor = goldbo.CompactionSourceLimitFactor
ldbo.CompactionTableSize = goldbo.CompactionTableSize
ldbo.CompactionTableSizeMultiplier = goldbo.CompactionTableSizeMultiplier
ldbo.CompactionTableSizeMultiplierPerLevel = goldbo.CompactionTableSizeMultiplierPerLevel
ldbo.CompactionTotalSize = goldbo.CompactionTotalSize
ldbo.CompactionTotalSizeMultiplier = goldbo.CompactionTotalSizeMultiplier
ldbo.CompactionTotalSizeMultiplierPerLevel = goldbo.CompactionTotalSizeMultiplierPerLevel
ldbo.Compression = uint(goldbo.Compression)
ldbo.DisableBufferPool = goldbo.DisableBufferPool
ldbo.DisableBlockCache = goldbo.DisableBlockCache
ldbo.DisableCompactionBackoff = goldbo.DisableCompactionBackoff
ldbo.DisableLargeBatchTransaction = goldbo.DisableLargeBatchTransaction
ldbo.IteratorSamplingRate = goldbo.IteratorSamplingRate
ldbo.NoSync = goldbo.NoSync
ldbo.NoWriteMerge = goldbo.NoWriteMerge
ldbo.OpenFilesCacheCapacity = goldbo.OpenFilesCacheCapacity
ldbo.ReadOnly = goldbo.ReadOnly
ldbo.Strict = uint(goldbo.Strict)
ldbo.WriteBuffer = goldbo.WriteBuffer
ldbo.WriteL0PauseTrigger = goldbo.WriteL0PauseTrigger
ldbo.WriteL0SlowdownTrigger = goldbo.WriteL0SlowdownTrigger
}
type jsonConfig struct {
Folder string `json:"folder,omitempty"`
LevelDBOptions levelDBOptions `json:"leveldb_options,omitempty"`
}
// ConfigKey returns a human-friendly identifier for this type of Datastore.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default initializes this Config with sensible values.
func (cfg *Config) Default() error {
cfg.Folder = DefaultSubFolder
cfg.LevelDBOptions = DefaultLevelDBOptions
return nil
}
// ApplyEnvVars fills in any Config fields found as environment variables.
func (cfg *Config) ApplyEnvVars() error {
jcfg := cfg.toJSONConfig()
err := envconfig.Process(envConfigKey, jcfg)
if err != nil {
return err
}
return cfg.applyJSONConfig(jcfg)
}
// Validate checks that the fields of this Config have working values,
// at least in appearance.
func (cfg *Config) Validate() error {
if cfg.Folder == "" {
return errors.New("folder is unset")
}
return nil
}
// LoadJSON reads the fields of this Config from a JSON byteslice as
// generated by ToJSON.
func (cfg *Config) LoadJSON(raw []byte) error {
jcfg := &jsonConfig{}
err := json.Unmarshal(raw, jcfg)
if err != nil {
return err
}
cfg.Default()
return cfg.applyJSONConfig(jcfg)
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
config.SetIfNotDefault(jcfg.Folder, &cfg.Folder)
ldbOpts := jcfg.LevelDBOptions.Unmarshal()
if err := mergo.Merge(&cfg.LevelDBOptions, ldbOpts, mergo.WithOverride); err != nil {
return err
}
return cfg.Validate()
}
// ToJSON generates a JSON-formatted human-friendly representation of this
// Config.
func (cfg *Config) ToJSON() (raw []byte, err error) {
jcfg := cfg.toJSONConfig()
raw, err = config.DefaultJSONMarshal(jcfg)
return
}
func (cfg *Config) toJSONConfig() *jsonConfig {
jCfg := &jsonConfig{}
if cfg.Folder != DefaultSubFolder {
jCfg.Folder = cfg.Folder
}
bo := &levelDBOptions{}
bo.Marshal(&cfg.LevelDBOptions)
jCfg.LevelDBOptions = *bo
return jCfg
}
// GetFolder returns the LevelDB folder.
func (cfg *Config) GetFolder() string {
if filepath.IsAbs(cfg.Folder) {
return cfg.Folder
}
return filepath.Join(cfg.BaseDir, cfg.Folder)
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View File

@ -0,0 +1,47 @@
package leveldb
import (
"testing"
)
var cfgJSON = []byte(`
{
"folder": "test",
"leveldb_options": {
"no_sync": true,
"compaction_total_size_multiplier": 1.5
}
}
`)
func TestLoadJSON(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON(cfgJSON)
if err != nil {
t.Fatal(err)
}
}
func TestToJSON(t *testing.T) {
cfg := &Config{}
cfg.LoadJSON(cfgJSON)
if !cfg.LevelDBOptions.NoSync {
t.Fatalf("NoSync should be true")
}
if cfg.LevelDBOptions.CompactionTotalSizeMultiplier != 1.5 {
t.Fatal("TotalSizeMultiplier should be 1.5")
}
newjson, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
cfg = &Config{}
err = cfg.LoadJSON(newjson)
if err != nil {
t.Fatal(err)
}
}

View File

@ -0,0 +1,32 @@
// Package leveldb provides a configurable LevelDB go-datastore for use with
// IPFS Cluster.
package leveldb
import (
"os"
ds "github.com/ipfs/go-datastore"
leveldbds "github.com/ipfs/go-ds-leveldb"
"github.com/pkg/errors"
)
// New returns a LevelDB datastore configured with the given
// configuration.
func New(cfg *Config) (ds.Datastore, error) {
folder := cfg.GetFolder()
err := os.MkdirAll(folder, 0700)
if err != nil {
return nil, errors.Wrap(err, "creating leveldb folder")
}
return leveldbds.NewDatastore(folder, (*leveldbds.Options)(&cfg.LevelDBOptions))
}
// Cleanup deletes the leveldb datastore.
func Cleanup(cfg *Config) error {
folder := cfg.GetFolder()
if _, err := os.Stat(folder); os.IsNotExist(err) {
return nil
}
return os.RemoveAll(cfg.GetFolder())
}

2
go.mod
View File

@ -20,6 +20,7 @@ require (
github.com/ipfs/go-datastore v0.4.5
github.com/ipfs/go-ds-badger v0.2.6
github.com/ipfs/go-ds-crdt v0.1.20
github.com/ipfs/go-ds-leveldb v0.4.2
github.com/ipfs/go-fs-lock v0.0.6
github.com/ipfs/go-ipfs-api v0.2.0
github.com/ipfs/go-ipfs-chunker v0.0.5
@ -65,6 +66,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.10.0
github.com/rs/cors v1.7.0
github.com/syndtr/goleveldb v1.0.0
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926
github.com/ugorji/go/codec v1.2.5
github.com/urfave/cli v1.22.5