ipfs-cluster/informer/disk/disk.go

162 lines
3.5 KiB
Go
Raw Normal View History

Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
// Package disk implements an ipfs-cluster informer which can provide different
// disk-related metrics from the IPFS daemon as an api.Metric.
package disk
import (
"context"
"fmt"
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
"sync"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/observations"
logging "github.com/ipfs/go-log/v2"
rpc "github.com/libp2p/go-libp2p-gorpc"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
)
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
// MetricType identifies the type of metric to fetch from the IPFS daemon.
type MetricType int
const (
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
// MetricFreeSpace provides the available space reported by IPFS
MetricFreeSpace MetricType = iota
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
// MetricRepoSize provides the used space reported by IPFS
MetricRepoSize
)
// String returns a string representation for MetricType.
func (t MetricType) String() string {
switch t {
case MetricFreeSpace:
return "freespace"
case MetricRepoSize:
return "reposize"
}
return ""
}
var logger = logging.Logger("diskinfo")
// Informer is a simple object to implement the ipfscluster.Informer
// and Component interfaces.
type Informer struct {
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
config *Config // set when created, readonly
mu sync.Mutex // guards access to following fields
rpcClient *rpc.Client
}
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
// NewInformer returns an initialized informer using the given InformerConfig.
func NewInformer(cfg *Config) (*Informer, error) {
err := cfg.Validate()
if err != nil {
return nil, err
}
2017-08-02 22:49:25 +00:00
Issue #162: Rework configuration format The following commit reimplements ipfs-cluster configuration under the following premises: * Each component is initialized with a configuration object defined by its module * Each component decides how the JSON representation of its configuration looks like * Each component parses and validates its own configuration * Each component exposes its own defaults * Component configurations are make the sections of a central JSON configuration file (which replaces the current JSON format) * Component configurations implement a common interface (config.ComponentConfig) with a set of common operations * The central configuration file is managed by a config.ConfigManager which: * Registers ComponentConfigs * Assigns the correspondent sections from the JSON file to each component and delegates the parsing * Delegates the JSON generation for each section * Can be notified when the configuration is updated and must be saved to disk The new service.json would then look as follows: ```json { "cluster": { "id": "QmTVW8NoRxC5wBhV7WtAYtRn7itipEESfozWN5KmXUQnk2", "private_key": "<...>", "secret": "00224102ae6aaf94f2606abf69a0e278251ecc1d64815b617ff19d6d2841f786", "peers": [], "bootstrap": [], "leave_on_shutdown": false, "listen_multiaddress": "/ip4/0.0.0.0/tcp/9096", "state_sync_interval": "1m0s", "ipfs_sync_interval": "2m10s", "replication_factor": -1, "monitor_ping_interval": "15s" }, "consensus": { "raft": { "heartbeat_timeout": "1s", "election_timeout": "1s", "commit_timeout": "50ms", "max_append_entries": 64, "trailing_logs": 10240, "snapshot_interval": "2m0s", "snapshot_threshold": 8192, "leader_lease_timeout": "500ms" } }, "api": { "restapi": { "listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", "read_timeout": "30s", "read_header_timeout": "5s", "write_timeout": "1m0s", "idle_timeout": "2m0s" } }, "ipfs_connector": { "ipfshttp": { "proxy_listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", "connect_swarms_delay": "7s", "proxy_read_timeout": "10m0s", "proxy_read_header_timeout": "5s", "proxy_write_timeout": "10m0s", "proxy_idle_timeout": "1m0s" } }, "monitor": { "monbasic": { "check_interval": "15s" } }, "informer": { "disk": { "metric_ttl": "30s", "metric_type": "freespace" }, "numpin": { "metric_ttl": "10s" } } } ``` This new format aims to be easily extensible per component. As such, it already surfaces quite a few new options which were hardcoded before. Additionally, since Go API have changed, some redundant methods have been removed and small refactoring has happened to take advantage of the new way. License: MIT Signed-off-by: Hector Sanjuan <hector@protocol.ai>
2017-10-11 18:23:03 +00:00
return &Informer{
config: cfg,
}, nil
}
// Name returns the name of the metric issued by this informer.
func (disk *Informer) Name() string {
return disk.config.MetricType.String()
}
// SetClient provides us with an rpc.Client which allows
// contacting other components in the cluster.
func (disk *Informer) SetClient(c *rpc.Client) {
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
disk.mu.Lock()
defer disk.mu.Unlock()
disk.rpcClient = c
}
// Shutdown is called on cluster shutdown. We just invalidate
// any metrics from this point.
func (disk *Informer) Shutdown(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "informer/disk/Shutdown")
defer span.End()
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
disk.mu.Lock()
defer disk.mu.Unlock()
disk.rpcClient = nil
return nil
}
// GetMetrics returns the metric obtained by this Informer. It must always
// return at least one metric.
func (disk *Informer) GetMetrics(ctx context.Context) []api.Metric {
ctx, span := trace.StartSpan(ctx, "informer/disk/GetMetric")
defer span.End()
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
disk.mu.Lock()
rpcClient := disk.rpcClient
disk.mu.Unlock()
if rpcClient == nil {
return []api.Metric{
{
Name: disk.Name(),
Valid: false,
},
}
}
var repoStat api.IPFSRepoStat
var weight uint64
var value string
valid := true
Guard access to informer rpc client with mutex Fixes data race ================== WARNING: DATA RACE Read at 0x00c02be544a8 by goroutine 4718: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).GetMetric() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:75 +0x135 github.com/ipfs/ipfs-cluster.(*Cluster).sendInformerMetric() /home/iand/wip/iand/ipfs-cluster/cluster.go:279 +0x149 github.com/ipfs/ipfs-cluster.(*Cluster).pushInformerMetrics() /home/iand/wip/iand/ipfs-cluster/cluster.go:324 +0x264 github.com/ipfs/ipfs-cluster.(*Cluster).run.func3() /home/iand/wip/iand/ipfs-cluster/cluster.go:583 +0xba Previous write at 0x00c02be544a8 by goroutine 3049: github.com/ipfs/ipfs-cluster/informer/disk.(*Informer).Shutdown() /home/iand/wip/iand/ipfs-cluster/informer/disk/disk.go:65 +0x10e github.com/ipfs/ipfs-cluster.(*Cluster).Shutdown() /home/iand/wip/iand/ipfs-cluster/cluster.go:768 +0x9d7 github.com/ipfs/ipfs-cluster.shutdownCluster() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:420 +0x64 github.com/ipfs/ipfs-cluster.shutdownClusters() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:414 +0x74 github.com/ipfs/ipfs-cluster.TestClustersReplicationRealloc() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:1680 +0x10c6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 Goroutine 4718 (running) created at: github.com/ipfs/ipfs-cluster.(*Cluster).run() /home/iand/wip/iand/ipfs-cluster/cluster.go:581 +0x16d github.com/ipfs/ipfs-cluster.NewCluster.func1() /home/iand/wip/iand/ipfs-cluster/cluster.go:208 +0xa4 Goroutine 3049 (running) created at: testing.(*T).Run() /opt/go/src/testing/testing.go:1239 +0x5d7 testing.runTests.func1() /opt/go/src/testing/testing.go:1512 +0xa6 testing.tRunner() /opt/go/src/testing/testing.go:1194 +0x202 testing.runTests() /opt/go/src/testing/testing.go:1510 +0x612 testing.(*M).Run() /opt/go/src/testing/testing.go:1418 +0x3b3 github.com/ipfs/ipfs-cluster.TestMain() /home/iand/wip/iand/ipfs-cluster/ipfscluster_test.go:134 +0x7dc main.main() _testmain.go:179 +0x271 ================== --- FAIL: TestClustersReplicationRealloc (13.60s) ipfscluster_test.go:1641: Shutting down QmYc1fpiBd7owHgDsnr42E7XtKZCTVjjm7FLnQMozcKid3 testing.go:1093: race detected during execution of test
2021-07-28 12:03:42 +00:00
err := rpcClient.CallContext(
ctx,
"",
"IPFSConnector",
"RepoStat",
struct{}{},
&repoStat,
)
if err != nil {
logger.Error(err)
valid = false
} else {
switch disk.config.MetricType {
case MetricFreeSpace:
size := repoStat.RepoSize
total := repoStat.StorageMax
if size < total {
weight = total - size
} else {
// Make sure we don't underflow and stop
// sending this metric when space is exhausted.
weight = 0
valid = false
logger.Warn("reported freespace is 0")
}
value = fmt.Sprintf("%d", weight)
case MetricRepoSize:
// smaller repositories have more priority
weight = -repoStat.RepoSize
value = fmt.Sprintf("%d", repoStat.RepoSize)
}
}
m := api.Metric{
Name: disk.Name(),
Value: value,
Valid: valid,
Weight: int64(weight),
Partitionable: false,
}
m.SetTTL(disk.config.MetricTTL)
stats.Record(ctx, observations.InformerDisk.M(m.Weight))
return []api.Metric{m}
}