Feat: ipfs-cluster-follow

This adds a new cluster command: ipfs-cluster-follow.

This command allows initializing and running follower peers as configured by a
remote-source configuration. The command can list configured peers
and obtain information for each of them.

Peers are launched with the rest API listening on a local unix socket. The
command can be run to list the items in the cluster pinset using this
endpoint. Alternatively, if no socket is present, the peer will be assumed to
be offline and the pin list will be directly read from the datastore.

Cluster peers launched with this command (and their configurations) are
compatible with ipfs-cluster-ctl and ipfs-cluster-service. We purposely do not
support most configuration options here. Using ipfs-cluster-ctl or launching
the peers using ipfs-cluster-service is always an option when the usecase
deviates from that supported by ipfs-cluster-follow.

Examples:

$ ipfs-cluster-follow -> list configured peers
$ ipfs-cluster-follow --help
$ ipfs-cluster-follow <clusterName> init <url>
$ ipfs-cluster-follow <clusterName> info
$ ipfs-cluster-follow <clusterName> run
$ ipfs-cluster-follow <clusterName> list
This commit is contained in:
Hector Sanjuan 2019-11-29 20:46:32 -06:00
parent ce3c50187e
commit 4ea830f74e
13 changed files with 817 additions and 6 deletions

1
.gitignore vendored
View File

@ -2,6 +2,7 @@ tag_annotation
coverage.out coverage.out
cmd/ipfs-cluster-service/ipfs-cluster-service cmd/ipfs-cluster-service/ipfs-cluster-service
cmd/ipfs-cluster-ctl/ipfs-cluster-ctl cmd/ipfs-cluster-ctl/ipfs-cluster-ctl
cmd/ipfs-cluster-follow/ipfs-cluster-follow
sharness/lib/sharness sharness/lib/sharness
sharness/test-results sharness/test-results
sharness/trash* sharness/trash*

View File

@ -6,21 +6,26 @@ all: build
clean: rwundo clean_sharness clean: rwundo clean_sharness
$(MAKE) -C cmd/ipfs-cluster-service clean $(MAKE) -C cmd/ipfs-cluster-service clean
$(MAKE) -C cmd/ipfs-cluster-ctl clean $(MAKE) -C cmd/ipfs-cluster-ctl clean
$(MAKE) -C cmd/ipfs-cluster-follow clean
@rm -rf ./test/testingData @rm -rf ./test/testingData
@rm -rf ./compose @rm -rf ./compose
install: install:
$(MAKE) -C cmd/ipfs-cluster-service install $(MAKE) -C cmd/ipfs-cluster-service install
$(MAKE) -C cmd/ipfs-cluster-ctl install $(MAKE) -C cmd/ipfs-cluster-ctl install
$(MAKE) -C cmd/ipfs-cluster-follow install
build: build:
$(MAKE) -C cmd/ipfs-cluster-service build $(MAKE) -C cmd/ipfs-cluster-service build
$(MAKE) -C cmd/ipfs-cluster-ctl build $(MAKE) -C cmd/ipfs-cluster-ctl build
$(MAKE) -C cmd/ipfs-cluster-follow build
service: service:
$(MAKE) -C cmd/ipfs-cluster-service ipfs-cluster-service $(MAKE) -C cmd/ipfs-cluster-service ipfs-cluster-service
ctl: ctl:
$(MAKE) -C cmd/ipfs-cluster-ctl ipfs-cluster-ctl $(MAKE) -C cmd/ipfs-cluster-ctl ipfs-cluster-ctl
follow:
$(MAKE) -C cmd/ipfs-cluster-follow ipfs-cluster-follow
check: check:
go vet ./... go vet ./...
@ -53,13 +58,13 @@ docker:
docker exec tmp-make-cluster sh -c "ipfs-cluster-ctl version" docker exec tmp-make-cluster sh -c "ipfs-cluster-ctl version"
docker exec tmp-make-cluster sh -c "ipfs-cluster-service -v" docker exec tmp-make-cluster sh -c "ipfs-cluster-service -v"
docker kill tmp-make-cluster docker kill tmp-make-cluster
docker build -t cluster-image-test -f Dockerfile-test . docker build -t cluster-image-test -f Dockerfile-test .
docker run --name tmp-make-cluster-test -d --rm cluster-image && sleep 8 docker run --name tmp-make-cluster-test -d --rm cluster-image && sleep 4
docker exec tmp-make-cluster-test sh -c "ipfs-cluster-ctl version" docker exec tmp-make-cluster-test sh -c "ipfs-cluster-ctl version"
docker exec tmp-make-cluster-test sh -c "ipfs-cluster-service -v" docker exec tmp-make-cluster-test sh -c "ipfs-cluster-service -v"
docker kill tmp-make-cluster-test docker kill tmp-make-cluster-test
docker-compose: docker-compose:
mkdir -p compose/ipfs0 compose/ipfs1 compose/cluster0 compose/cluster1 mkdir -p compose/ipfs0 compose/ipfs1 compose/cluster0 compose/cluster1
chmod -R 0777 compose chmod -R 0777 compose
@ -69,6 +74,6 @@ docker-compose:
docker exec cluster1 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3 docker exec cluster1 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3
docker-compose down docker-compose down
prcheck: check service ctl test prcheck: check service ctl follow test
.PHONY: all test test_sharness clean_sharness rw rwundo publish service ctl install clean docker .PHONY: all test test_sharness clean_sharness rw rwundo publish service ctl install clean docker

View File

@ -0,0 +1,17 @@
# go source files
SRC := $(shell find .. -type f -name '*.go')
all: ipfs-cluster-service
ipfs-cluster-follow: $(SRC)
go build -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
build: ipfs-cluster-follow
install:
go install -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
clean:
rm -f ipfs-cluster-follow
.PHONY: clean install build

View File

@ -0,0 +1,434 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ipfs/go-cid"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/allocator/descendalloc"
"github.com/ipfs/ipfs-cluster/api/rest"
"github.com/ipfs/ipfs-cluster/cmdutils"
"github.com/ipfs/ipfs-cluster/config"
"github.com/ipfs/ipfs-cluster/consensus/crdt"
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
"github.com/ipfs/ipfs-cluster/pintracker/stateless"
"github.com/pkg/errors"
cli "github.com/urfave/cli/v2"
)
func printFirstStart() {
fmt.Printf(`
No clusters configured yet!
If this is the first time you are running %s,
be sure to check out the usage documentation. Here are some
examples to get you going:
$ %s --help - general description and usage help
$ %s <clusterName> --help - Help and subcommands for the <clusterName>'s follower peer
$ %s <clusterName> info --help - Help for the "info" subcommand (same for others).
`, programName, programName, programName, programName)
}
func printNotInitialized(clusterName string, err error) {
fmt.Printf(`
This cluster peer has not been initialized or configurations cannot be read.
In the former case, try running "%s %s init <config-url>" first.
In the latter case, find more information in the error message below.
(Error message was: %s)
`, programName, clusterName, err)
}
func listCmd(c *cli.Context) error {
absPath, _, _ := buildPaths(c, "")
f, err := os.Open(absPath)
if os.IsNotExist(err) {
printFirstStart()
return nil
}
if err != nil {
return cli.Exit(err, 1)
}
dirs, err := f.Readdir(-1)
if err != nil {
return cli.Exit(errors.Wrapf(err, "reading %s", absPath), 1)
}
var filteredDirs []string
for _, d := range dirs {
if d.IsDir() {
configPath := filepath.Join(absPath, d.Name(), DefaultConfigFile)
if _, err := os.Stat(configPath); err == nil {
filteredDirs = append(filteredDirs, d.Name())
}
}
}
if len(filteredDirs) == 0 {
printFirstStart()
return nil
}
fmt.Printf("Configurations found for %d follower peers. For info and help, try running:\n\n", len(filteredDirs))
for _, d := range filteredDirs {
fmt.Printf("%s \"%s\"\n", programName, d)
}
fmt.Printf("\nTip: \"%s --help\" for help and examples.", programName)
return nil
}
func infoCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
for f := range ipfscluster.LoggingFacilities {
ipfscluster.SetFacilityLogLevel(f, "critical")
}
absPath, configPath, identityPath := buildPaths(c, clusterName)
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
var url string
if err != nil {
if config.IsErrFetchingSource(err) {
url = fmt.Sprintf(
"failed retrieving configuration source: %s",
cfgHelper.Manager().Source,
)
} else {
printNotInitialized(clusterName, err)
return cli.Exit("", 1)
}
} else {
url = fmt.Sprintf("Available (%s)", cfgHelper.Manager().Source)
}
cfgHelper.Manager().Shutdown()
fmt.Printf("Information about follower peer for Cluster \"%s\":\n\n", clusterName)
fmt.Printf("Config folder: %s\n", absPath)
fmt.Printf("Config source URL: %s\n", url)
ctx := context.Background()
client, err := getClient(absPath, clusterName)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating client"), 1)
}
_, err = client.Version(ctx)
fmt.Printf("Cluster Peer online: %t\n", err == nil)
connector, err := ipfshttp.NewConnector(cfgHelper.Configs().Ipfshttp)
if err == nil {
_, err = connector.ID(ctx)
}
fmt.Printf("IPFS peer online: %t\n", err == nil)
fmt.Printf("Additional help:\n\n")
fmt.Printf("-------------------------------------------------\n\n")
cli.ShowAppHelp(c)
return nil
}
func initCmd(c *cli.Context) error {
if !c.Args().Present() {
return cli.Exit("configuration URL not provided", 1)
}
cfgURL := c.Args().First()
return initCluster(c, false, cfgURL)
}
func initCluster(c *cli.Context, ignoreReinit bool, cfgURL string) error {
clusterName := c.String(clusterNameFlag)
absPath, configPath, identityPath := buildPaths(c, clusterName)
if _, err := os.Stat(absPath); !os.IsNotExist(err) {
if ignoreReinit {
fmt.Println("Configuration for this cluster already exists. Skipping initialization.")
fmt.Printf("If you wish to re-initialize, simply delete %s\n\n", absPath)
return nil
}
cmdutils.ErrorOut("Configuration for this cluster already exists.\n")
cmdutils.ErrorOut("Please delete %s if you wish to re-initialize.", absPath)
return cli.Exit("", 1)
}
if !strings.HasPrefix(cfgURL, "http://") && !strings.HasPrefix(cfgURL, "https://") {
fmt.Printf("%s will be assumed to be an DNSLink-powered address: /ipns/%s\n", cfgURL, cfgURL)
fmt.Println("It will be resolved using the local IPFS daemon's gateway (localhost:8080)")
fmt.Println("if this is not the case, specify the full url starting with http:// or https://")
cfgURL = fmt.Sprintf("http://127.0.0.1:8080/ipns/%s", cfgURL)
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt")
cfgHelper.Manager().Shutdown()
cfgHelper.Manager().Source = cfgURL
cfgHelper.Manager().Default()
ident := cfgHelper.Identity()
err := ident.Default()
if err != nil {
return cli.Exit(errors.Wrap(err, "error generating identity"), 1)
}
err = ident.ApplyEnvVars()
if err != nil {
return cli.Exit(errors.Wrap(err, "error applying environment variables to the identity"), 1)
}
err = cfgHelper.SaveIdentityToDisk()
if err != nil {
return cli.Exit(errors.Wrapf(err, "error saving %s", identityPath), 1)
}
fmt.Printf("Identity written to %s.\n", identityPath)
err = cfgHelper.SaveConfigToDisk()
if err != nil {
return cli.Exit(errors.Wrapf(err, "saving %s", configPath), 1)
}
fmt.Printf("Configuration written to %s.\n", configPath)
fmt.Printf("Cluster \"%s\" follower peer initialized.\n\n", clusterName)
fmt.Printf(
"You can now use \"%s %s run\" to start a follower peer for this cluster.\n",
programName,
clusterName,
)
fmt.Println("(Remember to start your IPFS daemon before)")
return nil
}
func runCmd(c *cli.Context) error {
clusterName := c.String(clusterNameFlag)
if cfgURL := c.String("init"); cfgURL != "" {
err := initCluster(c, true, cfgURL)
if err != nil {
return err
}
}
fmt.Printf("Starting the IPFS Cluster follower peer for \"%s\".\nCTRL-C to stop it.\n", clusterName)
for f := range ipfscluster.LoggingFacilities {
ipfscluster.SetFacilityLogLevel(f, logLevel)
}
ipfscluster.SetFacilityLogLevel("restapilog", "error")
absPath, configPath, identityPath := buildPaths(c, clusterName)
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
if err != nil {
printNotInitialized(clusterName, err)
return cli.Exit("", 1)
}
cfgHelper.Manager().Shutdown()
cfgs := cfgHelper.Configs()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating libp2p components"), 1)
}
// Always run followers in follower mode.
cfgs.Cluster.FollowerMode = true
// Discard API configurations and create our own
apiCfg := rest.Config{}
cfgs.Restapi = &apiCfg
apiCfg.Default()
listenSocket, err := socketAddress(absPath, clusterName)
if err != nil {
return cli.Exit(err, 1)
}
apiCfg.HTTPListenAddr = listenSocket
rest, err := rest.NewAPI(ctx, &apiCfg)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating REST API component"), 1)
}
connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating IPFS Connector component"), 1)
}
tracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername)
informer, err := disk.NewInformer(cfgs.Diskinf)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating disk informer"), 1)
}
alloc := descendalloc.NewAllocator()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgs)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating state manager"), 1)
}
store, err := stmgr.GetStore()
if err != nil {
return cli.Exit(errors.Wrap(err, "creating datastore"), 1)
}
crdtcons, err := crdt.New(
host,
dht,
pubsub,
cfgs.Crdt,
store,
)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "creating CRDT component"), 1)
}
mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, nil)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "setting up PeerMonitor"), 1)
}
// Hardcode disabled tracing and metrics to avoid mistakenously
// exposing any user data.
tracerCfg := observations.TracingConfig{}
tracerCfg.Default()
tracerCfg.EnableTracing = false
cfgs.Tracing = &tracerCfg
cfgHelper.SetupTracing(false)
tracer, err := observations.SetupTracing(&tracerCfg)
if err != nil {
return cli.Exit(errors.Wrap(err, "error setting up tracer"), 1)
}
metricsCfg := observations.MetricsConfig{}
metricsCfg.Default()
metricsCfg.EnableStats = false
cfgs.Metrics = &metricsCfg
err = observations.SetupMetrics(cfgs.Metrics)
if err != nil {
return cli.Exit(errors.Wrap(err, "error setting up metrics"), 1)
}
cluster, err := ipfscluster.NewCluster(
ctx,
host,
dht,
cfgs.Cluster,
store,
crdtcons,
[]ipfscluster.API{rest},
connector,
tracker,
mon,
alloc,
[]ipfscluster.Informer{informer},
tracer,
)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "error creating cluster peer"), 1)
}
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht)
}
// List
func pinsetCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
absPath, configPath, identityPath := buildPaths(c, clusterName)
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
if err != nil {
return cli.Exit(errors.Wrap(err, "error loading configurations"), 1)
}
cfgHelper.Manager().Shutdown()
err = printStatusOnline(absPath, clusterName)
if err != nil {
err := printStatusOffline(cfgHelper)
if err != nil {
logger.Error(err)
return cli.Exit("error obtaining the pinset", 1)
}
}
return nil
}
func printStatusOnline(absPath, clusterName string) error {
ctx := context.Background()
client, err := getClient(absPath, clusterName)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating client"), 1)
}
gpis, err := client.StatusAll(ctx, 0, true)
if err != nil {
return err
}
// do not return errors after this.
var pid string
for _, gpi := range gpis {
if pid == "" { // do this once
// PeerMap will only have one key
for k := range gpi.PeerMap {
pid = k
break
}
}
pinInfo := gpi.PeerMap[pid]
// Get pin name
var name string
pin, err := client.Allocation(ctx, gpi.Cid)
if err != nil {
name = "(" + err.Error() + ")"
} else {
name = pin.Name
}
printPin(gpi.Cid, pinInfo.Status.String(), name, pinInfo.Error)
}
return nil
}
func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
if err != nil {
return err
}
store, err := mgr.GetStore()
if err != nil {
return err
}
defer store.Close()
st, err := mgr.GetOfflineState(store)
if err != nil {
return err
}
pins, err := st.List(context.Background())
if err != nil {
return err
}
for _, pin := range pins {
printPin(pin.Cid, "offline", pin.Name, "")
}
return nil
}
func printPin(c cid.Cid, status, name, err string) {
if err != "" {
name = name + " (" + err + ")"
}
fmt.Printf("%-20s %s %s\n", status, c, name)
}

View File

@ -0,0 +1,13 @@
Copyright 2019. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,19 @@
Copyright 2019. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

33
cmd/ipfs-cluster-follow/dist/README.md vendored Normal file
View File

@ -0,0 +1,33 @@
# `ipfs-cluster-follow`
> A tool to run IPFS Cluster follower peers
`ipfs-cluster-follow` allows to setup and run IPFS Cluster follower peers.
Follower peers can join collaborative clusters to track content in the
cluster. Follower peers do not have permissions to modify the cluster pinset
or access endpoints from other follower peers.
`ipfs-cluster-follow` allows to run several peers at the same time (each
joining a different cluster) and it is intended to be a very easy to use
application with a minimal feature set. In order to run a fully-featured peer
(follower or not), use `ipfs-cluster-service`.
### Usage
The `ipfs-cluster-follow` command is always followed by the cluster name
that we wish to work with. Full usage information can be obtained by running:
```
$ ipfs-cluster-follow --help
$ ipfs-cluster-follow --help
$ ipfs-cluster-follow <clusterName> --help
$ ipfs-cluster-follow <clusterName> info --help
$ ipfs-cluster-follow <clusterName> init --help
$ ipfs-cluster-follow <clusterName> run --help
$ ipfs-cluster-follow <clusterName> list --help
```
For more information, please check the [Documentation](https://cluster.ipfs.io/documentation), in particular the [`ipfs-cluster-follow` section](https://cluster.ipfs.io/documentation/ipfs-cluster-follow).

View File

@ -0,0 +1,286 @@
// The ipfs-cluster-follow application.
package main
import (
"fmt"
"os"
"os/user"
"path/filepath"
"github.com/ipfs/ipfs-cluster/api/rest/client"
"github.com/ipfs/ipfs-cluster/cmdutils"
"github.com/ipfs/ipfs-cluster/version"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
semver "github.com/blang/semver"
logging "github.com/ipfs/go-log"
cli "github.com/urfave/cli/v2"
)
const (
// ProgramName of this application
programName = "ipfs-cluster-follow"
clusterNameFlag = "clusterName"
logLevel = "info"
)
// We store a commit id here
var commit string
// Description provides a short summary of the functionality of this tool
var Description = fmt.Sprintf(`
%s helps running IPFS Cluster follower peers.
Follower peers subscribe to a Cluster controlled by a set of "trusted
peers". They collaborate in pinning items as dictated by the trusted peers and
do not have the power to make Cluster-wide modifications to the pinset.
Follower peers cannot access information nor trigger actions in other peers.
%s can be used to follow different clusters by launching it
with different options. Each Cluster has an identity, a configuration
and a datastore associated to it, which are kept under
"~/%s/<cluster_name>".
For feedback, bug reports or any additional information, visit
https://github.com/ipfs/ipfs-cluster.
EXAMPLES:
List configured follower peers:
$ %s
Display information for a follower peer:
$ %s <clusterName> info
Initialize a follower peer:
$ %s <clusterName> init <example.url>
Launch a follower peer (will stay running):
$ %s <clusterName> run
List items in the pinset for a given cluster:
$ %s <clusterName> list
Getting help and usage info:
$ %s --help
$ %s <clusterName> --help
$ %s <clusterName> info --help
$ %s <clusterName> init --help
$ %s <clusterName> run --help
$ %s <clusterName> list --help
`,
programName,
programName,
DefaultFolder,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
)
var logger = logging.Logger("clusterfollow")
// Default location for the configurations and data
var (
// DefaultFolder is the name of the cluster folder
DefaultFolder = ".ipfs-cluster-follow"
// DefaultPath is set on init() to $HOME/DefaultFolder
// and holds all the ipfs-cluster data
DefaultPath string
// The name of the configuration file inside DefaultPath
DefaultConfigFile = "service.json"
// The name of the identity file inside DefaultPath
DefaultIdentityFile = "identity.json"
)
var (
configPath string
identityPath string
)
func init() {
// Set build information.
if build, err := semver.NewBuildVersion(commit); err == nil {
version.Version.Build = []string{"git" + build}
}
// We try guessing user's home from the HOME variable. This
// allows HOME hacks for things like Snapcraft builds. HOME
// should be set in all UNIX by the OS. Alternatively, we fall back to
// usr.HomeDir (which should work on Windows etc.).
home := os.Getenv("HOME")
if home == "" {
usr, err := user.Current()
if err != nil {
panic(fmt.Sprintf("cannot get current user: %s", err))
}
home = usr.HomeDir
}
DefaultPath = filepath.Join(home, DefaultFolder)
}
func main() {
app := cli.NewApp()
app.Name = programName
app.Usage = "IPFS Cluster Follower"
app.UsageText = fmt.Sprintf("%s [global options] <clusterName> [subcommand]...", programName)
app.Description = Description
//app.Copyright = "© Protocol Labs, Inc."
app.Version = version.Version.String()
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "config, c",
Value: DefaultPath,
Usage: "path to the followers configuration and data `FOLDER`",
EnvVars: []string{"IPFS_CLUSTER_PATH"},
},
}
app.Action = func(c *cli.Context) error {
if !c.Args().Present() {
return listCmd(c)
}
clusterName := c.Args().Get(0)
clusterApp := cli.NewApp()
clusterApp.Name = fmt.Sprintf("%s %s", programName, clusterName)
clusterApp.HelpName = clusterApp.Name
clusterApp.Usage = fmt.Sprintf("Follower peer management for \"%s\"", clusterName)
clusterApp.UsageText = fmt.Sprintf("%s %s [subcommand]", programName, clusterName)
clusterApp.Action = infoCmd
clusterApp.HideVersion = true
clusterApp.Flags = []cli.Flag{
&cli.StringFlag{ // pass clusterName to subcommands
Name: clusterNameFlag,
Value: clusterName,
Hidden: true,
},
}
clusterApp.Commands = []*cli.Command{
{
Name: "info",
Usage: "displays information for this peer",
ArgsUsage: "",
Description: fmt.Sprintf(`
This command display useful information for "%s"'s follower peer.
`, clusterName),
Action: infoCmd,
},
{
Name: "init",
Usage: "initializes the follower peer",
ArgsUsage: "<template_URL>",
Description: fmt.Sprintf(`
This command initializes a follower peer for the cluster named "%s". You
will need to pass the peer configuration URL. The command will generate a new
peer identity and leave things readyto run "%s %s run".
An error will be returned if a configuration folder for a cluster peer with
this name already exists. If you wish to re-initialize from scratch, delete
this folder first.
`, clusterName, programName, clusterName),
Action: initCmd,
},
{
Name: "run",
Usage: "runs the follower peer",
ArgsUsage: "",
Description: fmt.Sprintf(`
This commands runs a "%s" cluster follower peer. The peer should have already
been initialized with "init" alternatively the --init flag needs to be
passed.
Before running, ensure that you have connectivity and that the IPFS daemon is
running.
You can obtain more information about this follower peer by running
"%s %s" (without any arguments).
The peer will stay running in the foreground until manually stopped.
`, clusterName, programName, clusterName),
Action: runCmd,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "init",
Usage: "initialize cluster peer with the given URL before running",
},
},
},
{
Name: "list",
Usage: "list items in the peers' pinset",
ArgsUsage: "",
Description: `
This commands lists all the items pinned by this follower cluster peer on IPFS.
If the peer is currently running, it will display status information for each
pin (such as PINNING). If not, it will just display the current list of pins
as obtained from the internal state on disk.
`,
Action: pinsetCmd,
},
}
return clusterApp.RunAsSubcommand(c)
}
app.Run(os.Args)
}
// build paths returns the path to the configuration folder,
// the identify and the service.json files.
func buildPaths(c *cli.Context, clusterName string) (string, string, string) {
absPath, err := filepath.Abs(c.String("config"))
if err != nil {
cmdutils.ErrorOut("error getting aboslute path for %s: %s", err, clusterName)
os.Exit(1)
}
// ~/.ipfs-cluster-follow/clusterName
absPath = filepath.Join(absPath, clusterName)
// ~/.ipfs-cluster-follow/clusterName/service.json
configPath = filepath.Join(absPath, DefaultConfigFile)
// ~/.ipfs-cluster-follow/clusterName/indentity.json
identityPath = filepath.Join(absPath, DefaultIdentityFile)
return absPath, configPath, identityPath
}
func socketAddress(absPath, clusterName string) (multiaddr.Multiaddr, error) {
socket := fmt.Sprintf("/unix/%s", filepath.Join(absPath, "api-socket"))
ma, err := multiaddr.NewMultiaddr(socket)
if err != nil {
return nil, errors.Wrapf(err, "error parsing socket: %s", socket)
}
return ma, nil
}
func getClient(absPath, clusterName string) (client.Client, error) {
endp, err := socketAddress(absPath, clusterName)
if err != nil {
return nil, err
}
cfg := client.Config{
APIAddr: endp,
}
return client.NewDefaultClient(&cfg)
}

View File

@ -360,7 +360,7 @@ func (cfg *Manager) LoadJSONFromHTTPSource(url string) error {
cfg.Source = url cfg.Source = url
resp, err := http.Get(url) resp, err := http.Get(url)
if err != nil { if err != nil {
return errFetchingSource return fmt.Errorf("%w: %s", errFetchingSource, url)
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)

1
go.mod
View File

@ -72,6 +72,7 @@ require (
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926
github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec v1.1.7
github.com/urfave/cli v1.22.1 github.com/urfave/cli v1.22.1
github.com/urfave/cli/v2 v2.0.0
go.opencensus.io v0.22.1 go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd
gonum.org/v1/gonum v0.0.0-20190926113837-94b2bbd8ac13 gonum.org/v1/gonum v0.0.0-20190926113837-94b2bbd8ac13

2
go.sum
View File

@ -844,6 +844,8 @@ github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg=
github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I= github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=

View File

@ -21,7 +21,7 @@ const tracingEnvConfigKey = "cluster_tracing"
// Default values for this Config. // Default values for this Config.
const ( const (
DefaultEnableStats = false DefaultEnableStats = false
DefaultPrometheusEndpoint = "/ip4/0.0.0.0/tcp/8888" DefaultPrometheusEndpoint = "/ip4/127.0.0.1/tcp/8888"
DefaultReportingInterval = 2 * time.Second DefaultReportingInterval = 2 * time.Second
DefaultEnableTracing = false DefaultEnableTracing = false

View File

@ -23,7 +23,7 @@ import (
// if enabled. // if enabled.
func SetupMetrics(cfg *MetricsConfig) error { func SetupMetrics(cfg *MetricsConfig) error {
if cfg.EnableStats { if cfg.EnableStats {
logger.Info("stats collection enabled...") logger.Infof("stats collection enabled on %s", cfg.PrometheusEndpoint)
return setupMetrics(cfg) return setupMetrics(cfg)
} }
return nil return nil