Merge branch 'master' into feat/alerts

This commit is contained in:
Hector Sanjuan 2021-01-13 21:08:49 +01:00
commit 4bcb91ee2b
133 changed files with 3625 additions and 2214 deletions

View File

@ -4,7 +4,7 @@ coverage:
default:
# basic
target: auto
threshold: 5
threshold: 50
base: auto
# advanced
branches: null
@ -18,7 +18,7 @@ coverage:
default:
# basic
target: auto
threshold: 10
threshold: 50
base: auto
# advanced
branches: null
@ -27,4 +27,5 @@ coverage:
if_ci_failed: error
only_pulls: false
flags: null
paths: null
paths: null
comment: false

View File

@ -1,10 +0,0 @@
---
name: Ask a question
about: We prefer to answer quetsions at https://discuss.ipfs.io/ !
title: ''
labels: needs review, question
assignees: ''
---
We have a full section dedicated to IPFS Cluster in the IPFS Discuss forums: https://discuss.ipfs.io/c/help/help-ipfs-cluster and we would really appreciate if you ask your question there.

View File

@ -1,8 +1,8 @@
---
name: Bug report
about: Create a bug report for IPFS Cluster
about: Create a bug report for IPFS Cluster.
title: ''
labels: bug, needs review
labels: kind/bug, need/triage
assignees: ''
---
@ -10,10 +10,8 @@ assignees: ''
<!--
You are about to open an issue in the ipfs-cluster repository. Please verify that:
* This is not a IPFS Cluster website content issue: file those here: https://github.com/ipfs/ipfs-cluster-website/issues
* You read the troubleshooting docs (https://cluster.ipfs.io/documentation/troubleshooting/)
* You searched for similar issues in the repo without luck
* All peers are running the same cluster version
* All peers are configured using the same cluster `secret`
Thank you!

11
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: IPFS Cluster Website/Documentation issues
url: https://github.com/ipfs/ipfs-cluster-website
about: Report issues with the Docs or anything in the IPFS Cluster website.
- name: Official Documentation
url: https://cluster.ipfs.io/documentation
about: Extensive documentation is available at the IPFS Cluster website.
- name: IPFS Official Forums
url: https://discuss.ipfs.io
about: Please post general questions, support requests, and discussions here.

View File

@ -1,8 +1,8 @@
---
name: Feature request
about: Suggest an idea for IPFS Cluster
name: Feature/Enhancement request
about: Suggest an idea to improve IPFS Cluster.
title: ''
labels: enhancement/feature, needs review
labels: kind/feature, need/triage
assignees: ''
---
@ -13,4 +13,4 @@ Please describe in detail the feature you want to propose, your use-case and how
**Additional context**
Add any other context here, for example, your operating system or the method you use to install IPFS Cluster, if relevant.
Add any other context here, if relevant, that may help better understand your request.

68
.github/config.yml vendored Normal file
View File

@ -0,0 +1,68 @@
# Configuration for welcome - https://github.com/behaviorbot/welcome
# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome
# Comment to be posted to on first time issues
newIssueWelcomeComment: >
Thank you for submitting your first issue to this repository! A maintainer
will be here shortly to triage and review.
In the meantime, please double-check that you have provided all the
necessary information to make this process easy! Any information that can
help save additional round trips is useful! We currently aim to give
initial feedback within **two business days**. If this does not happen, feel
free to leave a comment.
Please keep an eye on how this issue will be labeled, as labels give an
overview of priorities, assignments and additional actions requested by the
maintainers:
- "Priority" labels will show how urgent this is for the team.
- "Status" labels will show if this is ready to be worked on, blocked, or in progress.
- "Need" labels will indicate if additional input or analysis is required.
Finally, remember to use https://discuss.ipfs.io if you just need general
support.
# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome
# Comment to be posted to on PRs from first time contributors in your repository
newPRWelcomeComment: >
Thank you for submitting this PR!
A maintainer will be here shortly to review it.
We are super grateful, but we are also overloaded! Help us by making sure
that:
* The context for this PR is clear, with relevant discussion, decisions
and stakeholders linked/mentioned.
* Your contribution itself is clear (code comments, self-review for the
rest) and in its best form. Follow the [code contribution
guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines)
if they apply.
Getting other community members to do a review would be great help too on
complex PRs (you can ask in the chats/forums). If you are unsure about
something, just leave us a comment.
Next steps:
* A maintainer will triage and assign priority to this PR, commenting on
any missing things and potentially assigning a reviewer for high
priority items.
* The PR gets reviews, discussed and approvals as needed.
* The PR is merged by maintainers when it has been approved and comments addressed.
We currently aim to provide initial feedback/triaging within **two business
days**. Please keep an eye on any labelling actions, as these will indicate
priorities and status of your contribution.
We are very grateful for your contribution!
# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge
# Comment to be posted to on pull requests merged by a first time user
# Currently disabled
#firstPRMergeComment: ""

View File

@ -3,7 +3,7 @@ os:
- linux
# - osx
go:
- '1.13.x'
- '1.15.x'
services:
- docker
@ -33,6 +33,7 @@ jobs:
- name: "Golint, go vet, binary builds"
script:
- go get -u golang.org/x/lint/golint
- go get honnef.co/go/tools/cmd/staticcheck
- make check
- make service
- make ctl

View File

@ -1,5 +1,131 @@
# IPFS Cluster Changelog
### v0.13.0 - 2020-05-19
IPFS Cluster v0.13.0 provides many improvements and bugfixes on multiple fronts.
First, this release takes advantange of all the major features that have
landed in libp2p and IPFS lands (via ipfs-lite) during the last few months,
including the dual-DHT and faster block exchange with Bitswap. On the
downside, **QUIC support for private networks has been temporally dropped**,
which means we cannot use the transport for Cluster peers anymore. We have disabled
QUIC for the time being until private network support is re-added.
Secondly, `go-ds-crdt` has received major improvements since the last version,
resolving some bugs and increasing performance. Because of this, **cluster
peers in CRDT mode running older versions will be unable to process updates
sent by peers running the newer versions**. This means, for example, that
followers on v0.12.1 and earlier will be unable to receive updates from
trusted peers on v0.13.0 and later. However, peers running v0.13.0 will still
understand updates sent from older peers.
Finally, we have resolved some bugs and added a few very useful features,
which are detailed in the list below. We recommend everyone to upgrade as soon
as possible for a swifter experience with IPFS Cluster.
#### List of changes
##### Features
* Support multiple listen interfaces | [ipfs/ipfs-cluster#1000](https://github.com/ipfs/ipfs-cluster/issues/1000) | [ipfs/ipfs-cluster#1010](https://github.com/ipfs/ipfs-cluster/issues/1010) | [ipfs/ipfs-cluster#1002](https://github.com/ipfs/ipfs-cluster/issues/1002)
* Show expiration information in `ipfs-cluster-ctl pin ls` | [ipfs/ipfs-cluster#998](https://github.com/ipfs/ipfs-cluster/issues/998) | [ipfs/ipfs-cluster#1024](https://github.com/ipfs/ipfs-cluster/issues/1024) | [ipfs/ipfs-cluster#1066](https://github.com/ipfs/ipfs-cluster/issues/1066)
* Show pin names in `ipfs-cluster-ctl status` (and API endpoint) | [ipfs/ipfs-cluster#1129](https://github.com/ipfs/ipfs-cluster/issues/1129)
* Allow updating expiration when doing `pin update` | [ipfs/ipfs-cluster#996](https://github.com/ipfs/ipfs-cluster/issues/996) | [ipfs/ipfs-cluster#1065](https://github.com/ipfs/ipfs-cluster/issues/1065) | [ipfs/ipfs-cluster#1013](https://github.com/ipfs/ipfs-cluster/issues/1013)
* Add "direct" pin mode. Cluster supports direct pins | [ipfs/ipfs-cluster#1009](https://github.com/ipfs/ipfs-cluster/issues/1009) | [ipfs/ipfs-cluster#1083](https://github.com/ipfs/ipfs-cluster/issues/1083)
* Better badger defaults for less memory usage | [ipfs/ipfs-cluster#1027](https://github.com/ipfs/ipfs-cluster/issues/1027)
* Print configuration (without sensitive values) when enabling debug for `ipfs-cluster-service` | [ipfs/ipfs-cluster#937](https://github.com/ipfs/ipfs-cluster/issues/937) | [ipfs/ipfs-cluster#959](https://github.com/ipfs/ipfs-cluster/issues/959)
* `ipfs-cluster-follow <cluster> list` works fully offline (without needing IPFS to run) | [ipfs/ipfs-cluster#1129](https://github.com/ipfs/ipfs-cluster/issues/1129)
##### Bug fixes
* Fix adding when using CidV1 | [ipfs/ipfs-cluster#1016](https://github.com/ipfs/ipfs-cluster/issues/1016) | [ipfs/ipfs-cluster#1006](https://github.com/ipfs/ipfs-cluster/issues/1006)
* Fix too many requests error on `ipfs-cluster-follow <cluster> list` | [ipfs/ipfs-cluster#1013](https://github.com/ipfs/ipfs-cluster/issues/1013) | [ipfs/ipfs-cluster#1129](https://github.com/ipfs/ipfs-cluster/issues/1129)
* Fix repinning not working reliably on collaborative clusters with replication factors set | [ipfs/ipfs-cluster#1064](https://github.com/ipfs/ipfs-cluster/issues/1064) | [ipfs/ipfs-cluster#1127](https://github.com/ipfs/ipfs-cluster/issues/1127)
* Fix underflow in repo size metric | [ipfs/ipfs-cluster#1120](https://github.com/ipfs/ipfs-cluster/issues/1120) | [ipfs/ipfs-cluster#1121](https://github.com/ipfs/ipfs-cluster/issues/1121)
* Fix adding keeps going if all BlockPut failed | [ipfs/ipfs-cluster#1131](https://github.com/ipfs/ipfs-cluster/issues/1131)
##### Other changes
* Update license files | [ipfs/ipfs-cluster#1014](https://github.com/ipfs/ipfs-cluster/issues/1014)
* Fix typos | [ipfs/ipfs-cluster#999](https://github.com/ipfs/ipfs-cluster/issues/999) | [ipfs/ipfs-cluster#1001](https://github.com/ipfs/ipfs-cluster/issues/1001) | [ipfs/ipfs-cluster#1075](https://github.com/ipfs/ipfs-cluster/issues/1075)
* Lots of dependency upgrades | [ipfs/ipfs-cluster#1020](https://github.com/ipfs/ipfs-cluster/issues/1020) | [ipfs/ipfs-cluster#1051](https://github.com/ipfs/ipfs-cluster/issues/1051) | [ipfs/ipfs-cluster#1073](https://github.com/ipfs/ipfs-cluster/issues/1073) | [ipfs/ipfs-cluster#1074](https://github.com/ipfs/ipfs-cluster/issues/1074)
* Adjust codecov thresholds | [ipfs/ipfs-cluster#1022](https://github.com/ipfs/ipfs-cluster/issues/1022)
* Fix all staticcheck warnings | [ipfs/ipfs-cluster#1071](https://github.com/ipfs/ipfs-cluster/issues/1071) | [ipfs/ipfs-cluster#1128](https://github.com/ipfs/ipfs-cluster/issues/1128)
* Detach RPC protocol version from Cluster releases | [ipfs/ipfs-cluster#1093](https://github.com/ipfs/ipfs-cluster/issues/1093)
* Trim paths on Makefile build command | [ipfs/ipfs-cluster#1012](https://github.com/ipfs/ipfs-cluster/issues/1012) | [ipfs/ipfs-cluster#1015](https://github.com/ipfs/ipfs-cluster/issues/1015)
* Add contexts to HTTP requests in the client | [ipfs/ipfs-cluster#1019](https://github.com/ipfs/ipfs-cluster/issues/1019)
#### Upgrading notices
##### Configuration changes
* The default options in the `datastore/badger/badger_options` have changed
and should reduce memory usage significantly:
* `truncate` is set to `true`.
* `value_log_loading_mode` is set to `0` (FileIO).
* `max_table_size` is set to `16777216`.
* `api/ipfsproxy/listen_multiaddress`, `api/rest/http_listen_multiaddress` and
`api/rest/libp2p_listen_multiaddress` now support an array of multiaddresses
rather than a single one (a single one still works). This allows, for
example, listening on both IPv6 and IPv4 interfaces.
##### REST API
The `POST /pins/{hash}` endpoint (`pin add`) now supports a `mode` query
parameter than can be set to `recursive` or `direct`. The responses including
Pin objects (`GET /allocations`, `pin ls`) include a `mode` field set
accordingly.
The IPFS proxy `/pin/add` endpoint now supports `recursive=false` for direct pins.
The `/pins` endpoint now return `GlobalPinInfo` objects that include a `name`
field for the pin name. The same objects do not embed redundant information
anymore for each peer in the `peer_map`: `cid` and `peer` are ommitted.
##### Go APIs
The `ipfscluster.IPFSConnector` component signature for `PinLsCid` has changed
and receives a full `api.Pin` object, rather than a Cid. The RPC endpoint has
changed accordingly, but since this is a private endpoint, it does not affect
interoperability between peers.
The `api.GlobalPinInfo` type now maps every peer to a new `api.PinInfoShort`
type, that does not include any redundant information (Cid, Peer), as the
`PinInfo` type did. The `Cid` is available as a top-level field. The `Peer`
corresponds to the map key. A new `Name` top-level field contains the Pin
Name.
The `api.PinInfo` file includes also a new `Name` field.
##### Other
From this release, IPFS Cluster peers running in different minor versions will
remain compatible at the RPC layer (before, all cluster peers had to be
running on precisely the same minor version to be able to communicate). This
means that v0.13.0 peers are still compatible with v0.12.x peers (with the
caveat for CRDT-peers mentioned at the top). `ipfs-cluster-ctl --enc=json id`
shows information about the RPC protocol used.
Since the QUIC libp2p transport does not support private networks at this
point, it has been disabled, even though we keep the QUIC endpoint among the
default listeners.
---
### v0.12.1 - 2019-12-24
IPFS Cluster v0.12.1 is a maintenance release fixing issues on `ipfs-cluster-follow`.
#### List of changes
##### Bug fixes
* follow: the `info` command panics when ipfs is offline | [ipfs/ipfs-cluster#991](https://github.com/ipfs/ipfs-cluster/issues/991) | [ipfs/ipfs-cluster#993](https://github.com/ipfs/ipfs-cluster/issues/993)
* follow: the gateway url is not set on Run&Init command | [ipfs/ipfs-cluster#992](https://github.com/ipfs/ipfs-cluster/issues/992) | [ipfs/ipfs-cluster#993](https://github.com/ipfs/ipfs-cluster/issues/993)
* follow: disallow trusted peers for RepoGCLocal operation | [ipfs/ipfs-cluster#993](https://github.com/ipfs/ipfs-cluster/issues/993)
---
### v0.12.0 - 2019-12-20
@ -96,7 +222,7 @@ The `ipfs-cluster-follow` application is an easy to use way to run one or severa
That said, the configuration layout and folder is the same for both `ipfs-cluster-service` and `ipfs-cluster-follow` and they can be run one in place of the other. In the same way, remote-source configurations usually used for `ipfs-cluster-follow` can be replaced with local ones usually used by `ipfs-cluster-service`.
The removal of the `map pintracker` has resulted in a simplification of some operations. `StateSync` (regularly run every `state_sync_interval`) does not trigger repinnings now, but only checks for pin expirations. `RecoverAllLocal` (reguarly run every `pin_recover_interval`) will now trigger repinnings when necessary (i.e. when things that were expected to be on IPFS are not). On very large pinsets, this operation can trigger a memory spike as the full recursive pinset from IPFS is requested and loaded on memory (before this happened on `StateSync`).
The removal of the `map pintracker` has resulted in a simplification of some operations. `StateSync` (regularly run every `state_sync_interval`) does not trigger repinnings now, but only checks for pin expirations. `RecoverAllLocal` (regularly run every `pin_recover_interval`) will now trigger repinnings when necessary (i.e. when things that were expected to be on IPFS are not). On very large pinsets, this operation can trigger a memory spike as the full recursive pinset from IPFS is requested and loaded on memory (before this happened on `StateSync`).
---
@ -712,7 +838,7 @@ adding a file, which is always the root hash.
* IPFS Proxy extraction to its own `API` component: `ipfsproxy` | [ipfs/ipfs-cluster#453](https://github.com/ipfs/ipfs-cluster/issues/453) | [ipfs/ipfs-cluster#576](https://github.com/ipfs/ipfs-cluster/issues/576) | [ipfs/ipfs-cluster#616](https://github.com/ipfs/ipfs-cluster/issues/616) | [ipfs/ipfs-cluster#617](https://github.com/ipfs/ipfs-cluster/issues/617)
* Add full CORS handling to `restapi` | [ipfs/ipfs-cluster#639](https://github.com/ipfs/ipfs-cluster/issues/639) | [ipfs/ipfs-cluster#640](https://github.com/ipfs/ipfs-cluster/issues/640)
* `restapi` configuration section entries can be overriden from environment variables | [ipfs/ipfs-cluster#609](https://github.com/ipfs/ipfs-cluster/issues/609)
* `restapi` configuration section entries can be overridden from environment variables | [ipfs/ipfs-cluster#609](https://github.com/ipfs/ipfs-cluster/issues/609)
* Update to `go-ipfs-files` 2.0 | [ipfs/ipfs-cluster#613](https://github.com/ipfs/ipfs-cluster/issues/613)
* Tests for the `/monitor/metrics` endpoint | [ipfs/ipfs-cluster#587](https://github.com/ipfs/ipfs-cluster/issues/587) | [ipfs/ipfs-cluster#622](https://github.com/ipfs/ipfs-cluster/issues/622)
* Support `stream-channels=fase` query parameter in `/add` | [ipfs/ipfs-cluster#632](https://github.com/ipfs/ipfs-cluster/issues/632) | [ipfs/ipfs-cluster#633](https://github.com/ipfs/ipfs-cluster/issues/633)

View File

@ -1,4 +1,4 @@
FROM golang:1.13-stretch AS builder
FROM golang:1.15-buster AS builder
MAINTAINER Hector Sanjuan <hector@protocol.ai>
# This dockerfile builds and runs ipfs-cluster-service.

View File

@ -1,4 +1,4 @@
FROM golang:1.13-stretch AS builder
FROM golang:1.15-buster AS builder
MAINTAINER Hector Sanjuan <hector@protocol.ai>
# This dockerfile builds cluster and runs it along with go-ipfs.

View File

@ -1,4 +1,4 @@
FROM golang:1.13-stretch AS builder
FROM golang:1.15-buster AS builder
MAINTAINER Hector Sanjuan <hector@protocol.ai>
# This build state just builds the cluster binaries
@ -51,7 +51,7 @@ COPY --from=builder /tmp/jq-linux64 /usr/local/bin/jq
# Add bash
COPY --from=builder /bin/bash /bin/bash
COPY --from=builder /lib/x86_64-linux-gnu/libtinfo.so.5 /lib64/libtinfo.so.5
COPY --from=builder /lib/x86_64-linux-gnu/libtinfo.so* /lib64/
USER root

5
LICENSE Normal file
View File

@ -0,0 +1,5 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View File

@ -1,4 +1,4 @@
Copyright 2019. Protocol Labs, Inc.
Copyright 2020. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
Copyright 2019. Protocol Labs, Inc.
Copyright 2020. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -30,6 +30,7 @@ follow:
check:
go vet ./...
golint -set_exit_status -min_confidence 0.3 ./...
staticcheck ./...
test:
go test -v ./...

View File

@ -178,7 +178,7 @@ func TestAddOnePeerFails(t *testing.T) {
defer wg.Done()
_, err := clusters[0].AddFile(r, params)
if err != nil {
t.Fatal(err)
t.Error(err)
}
}()
@ -236,7 +236,7 @@ func TestAddAllPeersFail(t *testing.T) {
defer wg.Done()
_, err := clusters[0].AddFile(r, params)
if err != adder.ErrBlockAdder {
t.Fatal("expected ErrBlockAdder. Got: ", err)
t.Error("expected ErrBlockAdder. Got: ", err)
}
}()

View File

@ -14,7 +14,7 @@ import (
cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
merkledag "github.com/ipfs/go-merkledag"
multihash "github.com/multiformats/go-multihash"
)
@ -153,7 +153,7 @@ func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (cid.Cid, erro
// followed suit, it no longer receives the name of the
// file/folder being added and does not emit AddedOutput
// events with the right names. We addressed this by adding
// OutputPrefix to our version. go-ipfs modifies emmited
// OutputPrefix to our version. go-ipfs modifies emitted
// events before sending to user).
ipfsAdder.OutputPrefix = it.Name()

View File

@ -14,7 +14,7 @@ import (
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
rpc "github.com/libp2p/go-libp2p-gorpc"
)

View File

@ -16,7 +16,7 @@ import (
files "github.com/ipfs/go-ipfs-files"
posinfo "github.com/ipfs/go-ipfs-posinfo"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
dag "github.com/ipfs/go-merkledag"
mfs "github.com/ipfs/go-mfs"
unixfs "github.com/ipfs/go-unixfs"
@ -61,7 +61,7 @@ type Adder struct {
liveNodes uint64
lastFile mfs.FSNode
// Cluster: ipfs does a hack in commands/add.go to set the filenames
// in emmited events correctly. We carry a root folder name (or a
// in emitted events correctly. We carry a root folder name (or a
// filename in the case of single files here and emit those events
// correctly from the beginning).
OutputPrefix string
@ -121,29 +121,30 @@ func (adder *Adder) add(reader io.Reader) (ipld.Node, error) {
return nd, nil
}
// RootNode returns the mfs root node
func (adder *Adder) curRootNode() (ipld.Node, error) {
mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
root, err := mr.GetDirectory().GetNode()
if err != nil {
return nil, err
}
// Cluster: commented as it is unused
// // RootNode returns the mfs root node
// func (adder *Adder) curRootNode() (ipld.Node, error) {
// mr, err := adder.mfsRoot()
// if err != nil {
// return nil, err
// }
// root, err := mr.GetDirectory().GetNode()
// if err != nil {
// return nil, err
// }
// if one root file, use that hash as root.
if len(root.Links()) == 1 {
nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
if err != nil {
return nil, err
}
// // if one root file, use that hash as root.
// if len(root.Links()) == 1 {
// nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
// if err != nil {
// return nil, err
// }
root = nd
}
// root = nd
// }
return root, err
}
// return root, err
// }
// PinRoot recursively pins the root node of Adder and
// writes the pin state to the backing datastore.

View File

@ -11,7 +11,7 @@ package sharding
// all of the links. Note that this limit is only reached at shard sizes 7
// times the size of the current default and then only when files are all
// 1 byte in size. In the future we may generalize the shard dag to multiple
// indirect nodes to accomodate much bigger shard sizes. Also note that the
// indirect nodes to accommodate much bigger shard sizes. Also note that the
// move to using the identity hash function in cids of very small data
// will improve link density in shard nodes and further reduce the need for
// multiple levels of indirection.
@ -36,7 +36,6 @@ func init() {
// MaxLinks is the max number of links that, when serialized fit into a block
const MaxLinks = 5984
const fixedPerLink = 40
const hashFn = mh.SHA2_256
// CborDataToNode parses cbor data into a clusterDAG node while making a few

View File

@ -16,12 +16,11 @@ import (
humanize "github.com/dustin/go-humanize"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var errNotFound = errors.New("dagservice: block not found")
var logger = logging.Logger("shardingdags")
// DAGService is an implementation of a ClusterDAGService which
@ -53,6 +52,8 @@ type DAGService struct {
// New returns a new ClusterDAGService, which uses the given rpc client to perform
// Allocate, IPFSBlockPut and Pin requests to other cluster components.
func New(rpc *rpc.Client, opts api.PinOptions, out chan<- *api.AddedOutput) *DAGService {
// use a default value for this regardless of what is provided.
opts.Mode = api.PinModeRecursive
return &DAGService{
rpcClient: rpc,
pinOpts: opts,
@ -83,7 +84,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
}
if !lastCid.Equals(dataRoot) {
logger.Warningf("the last added CID (%s) is not the IPFS data root (%s). This is only normal when adding a single file without wrapping in directory.", lastCid, dataRoot)
logger.Warnf("the last added CID (%s) is not the IPFS data root (%s). This is only normal when adding a single file without wrapping in directory.", lastCid, dataRoot)
}
clusterDAGNodes, err := makeDAG(ctx, dgs.shards)
@ -133,7 +134,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
// Consider doing this? Seems like overkill
//
// // Ammend ShardPins to reference clusterDAG root hash as a Parent
// // Amend ShardPins to reference clusterDAG root hash as a Parent
// shardParents := cid.NewSet()
// shardParents.Add(clusterDAG)
// for shardN, shard := range dgs.shardNodes {
@ -212,7 +213,7 @@ func (dgs *DAGService) logStats(metaPin, clusterDAGPin cid.Cid) {
rate = humanize.Bytes(dgs.totalSize / seconds)
}
statsFmt := `sharding session sucessful:
statsFmt := `sharding session successful:
CID: %s
ClusterDAG: %s
Total shards: %d

View File

@ -12,7 +12,7 @@ import (
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
@ -194,7 +194,7 @@ func TestFromMultipart_Errors(t *testing.T) {
}
tcs := []*testcase{
&testcase{
{
name: "bad chunker",
params: &api.AddParams{
Layout: "",
@ -210,7 +210,7 @@ func TestFromMultipart_Errors(t *testing.T) {
},
},
},
&testcase{
{
name: "shard size too small",
params: &api.AddParams{
Layout: "",
@ -226,7 +226,7 @@ func TestFromMultipart_Errors(t *testing.T) {
},
},
},
&testcase{
{
name: "replication too high",
params: &api.AddParams{
Layout: "",

View File

@ -35,17 +35,17 @@ func newShard(ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard
return nil, err
}
if opts.ReplicationFactorMin > 0 && (allocs == nil || len(allocs) == 0) {
if opts.ReplicationFactorMin > 0 && len(allocs) == 0 {
// This would mean that the empty cid is part of the shared state somehow.
panic("allocations for new shard cannot be empty without error")
}
if opts.ReplicationFactorMin < 0 {
logger.Warning("Shard is set to replicate everywhere ,which doesn't make sense for sharding")
logger.Warn("Shard is set to replicate everywhere ,which doesn't make sense for sharding")
}
// TODO (hector): get latest metrics for allocations, adjust sizeLimit
// to minumum. This can be done later.
// to minimum. This can be done later.
return &shard{
rpc: rpc,

View File

@ -4,21 +4,19 @@ package single
import (
"context"
"errors"
adder "github.com/ipfs/ipfs-cluster/adder"
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var errNotFound = errors.New("dagservice: block not found")
var logger = logging.Logger("singledags")
var _ = logger // otherwise unused
// DAGService is an implementation of an adder.ClusterDAGService which
// puts the added blocks directly in the peers allocated to them (without
@ -38,6 +36,8 @@ type DAGService struct {
// New returns a new Adder with the given rpc Client. The client is used
// to perform calls to IPFS.BlockPut and Pin content on Cluster.
func New(rpc *rpc.Client, opts api.PinOptions, local bool) *DAGService {
// ensure don't Add something and pin it in direct mode.
opts.Mode = api.PinModeRecursive
return &DAGService{
rpcClient: rpc,
dests: nil,

View File

@ -51,10 +51,12 @@ func (ba *BlockAdder) Add(ctx context.Context, node ipld.Node) error {
rpcutil.RPCDiscardReplies(len(ba.dests)),
)
var sucessfulDests []peer.ID
var successfulDests []peer.ID
numErrs := 0
for i, e := range errs {
if e != nil {
logger.Errorf("BlockPut on %s: %s", ba.dests[i], e)
numErrs++
}
// RPCErrors include server errors (wrong RPC methods), client
@ -64,14 +66,18 @@ func (ba *BlockAdder) Add(ctx context.Context, node ipld.Node) error {
if rpc.IsRPCError(e) {
continue
}
sucessfulDests = append(sucessfulDests, ba.dests[i])
successfulDests = append(successfulDests, ba.dests[i])
}
if len(sucessfulDests) == 0 {
// If all requests resulted in errors, fail.
// Successful dests will have members when no errors happened
// or when an error happened but it was not an RPC error.
// As long as BlockPut worked in 1 destination, we move on.
if numErrs == len(ba.dests) || len(successfulDests) == 0 {
return ErrBlockAdder
}
ba.dests = sucessfulDests
ba.dests = successfulDests
return nil
}
@ -90,23 +96,13 @@ func (ba *BlockAdder) AddMany(ctx context.Context, nodes []ipld.Node) error {
func ipldNodeToNodeWithMeta(n ipld.Node) *api.NodeWithMeta {
size, err := n.Size()
if err != nil {
logger.Warning(err)
}
format, ok := cid.CodecToStr[n.Cid().Type()]
if !ok {
format = ""
logger.Warning("unsupported cid type, treating as v0")
}
if n.Cid().Prefix().Version == 0 {
format = "v0"
logger.Warn(err)
}
return &api.NodeWithMeta{
Cid: n.Cid(),
Data: n.RawData(),
CumSize: size,
Format: format,
}
}

View File

@ -47,7 +47,7 @@ import (
// into account if the given CID was previously in a "pin everywhere" mode,
// and will consider such Pins as currently unallocated ones, providing
// new allocations as available.
func (c *Cluster) allocate(ctx context.Context, hash cid.Cid, rplMin, rplMax int, blacklist []peer.ID, prioritylist []peer.ID) ([]peer.ID, error) {
func (c *Cluster) allocate(ctx context.Context, hash cid.Cid, currentPin *api.Pin, rplMin, rplMax int, blacklist []peer.ID, prioritylist []peer.ID) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/allocate")
defer span.End()
@ -61,8 +61,7 @@ func (c *Cluster) allocate(ctx context.Context, hash cid.Cid, rplMin, rplMax int
// Figure out who is holding the CID
var currentAllocs []peer.ID
currentPin, err := c.PinGet(ctx, hash)
if err == nil {
if currentPin != nil {
currentAllocs = currentPin.Allocations
}
metrics := c.monitor.LatestMetrics(ctx, c.informers[0].Name())

View File

@ -11,13 +11,10 @@ import (
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("ascendalloc")
// AscendAllocator extends the SimpleAllocator
type AscendAllocator struct{}

View File

@ -11,13 +11,10 @@ import (
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("descendalloc")
// DescendAllocator extends the SimpleAllocator
type DescendAllocator struct{}

View File

@ -61,6 +61,7 @@ func DefaultAddParams() *AddParams {
ReplicationFactorMin: 0,
ReplicationFactorMax: 0,
Name: "",
Mode: PinModeRecursive,
ShardSize: DefaultShardSize,
Metadata: make(map[string]string),
},

View File

@ -7,6 +7,7 @@ import (
"path/filepath"
"time"
ipfsconfig "github.com/ipfs/go-ipfs-config"
"github.com/kelseyhightower/envconfig"
ma "github.com/multiformats/go-multiaddr"
@ -19,9 +20,13 @@ const (
minMaxHeaderBytes = 4096
)
// DefaultListenAddrs contains the default listeners for the proxy.
var DefaultListenAddrs = []string{
"/ip4/127.0.0.1/tcp/9095",
}
// Default values for Config.
const (
DefaultListenAddr = "/ip4/127.0.0.1/tcp/9095"
DefaultNodeAddr = "/ip4/127.0.0.1/tcp/5001"
DefaultNodeHTTPS = false
DefaultReadTimeout = 0
@ -39,7 +44,7 @@ type Config struct {
config.Saver
// Listen parameters for the IPFS Proxy.
ListenAddr ma.Multiaddr
ListenAddr []ma.Multiaddr
// Host/Port for the IPFS daemon.
NodeAddr ma.Multiaddr
@ -93,9 +98,9 @@ type Config struct {
}
type jsonConfig struct {
ListenMultiaddress string `json:"listen_multiaddress"`
NodeMultiaddress string `json:"node_multiaddress"`
NodeHTTPS bool `json:"node_https,omitempty"`
ListenMultiaddress ipfsconfig.Strings `json:"listen_multiaddress"`
NodeMultiaddress string `json:"node_multiaddress"`
NodeHTTPS bool `json:"node_https,omitempty"`
LogFile string `json:"log_file"`
@ -131,9 +136,13 @@ func (cfg *Config) ConfigKey() string {
// Default sets the fields of this Config to sensible default values.
func (cfg *Config) Default() error {
proxy, err := ma.NewMultiaddr(DefaultListenAddr)
if err != nil {
return err
proxy := make([]ma.Multiaddr, 0, len(DefaultListenAddrs))
for _, def := range DefaultListenAddrs {
a, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
proxy = append(proxy, a)
}
node, err := ma.NewMultiaddr(DefaultNodeAddr)
if err != nil {
@ -174,7 +183,7 @@ func (cfg *Config) ApplyEnvVars() error {
// at least in appearance.
func (cfg *Config) Validate() error {
var err error
if cfg.ListenAddr == nil {
if len(cfg.ListenAddr) == 0 {
err = errors.New("ipfsproxy.listen_multiaddress not set")
}
if cfg.NodeAddr == nil {
@ -230,12 +239,15 @@ func (cfg *Config) LoadJSON(raw []byte) error {
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
if jcfg.ListenMultiaddress != "" {
proxyAddr, err := ma.NewMultiaddr(jcfg.ListenMultiaddress)
if err != nil {
return fmt.Errorf("error parsing proxy listen_multiaddress: %s", err)
if addresses := jcfg.ListenMultiaddress; len(addresses) > 0 {
cfg.ListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, a := range addresses {
proxyAddr, err := ma.NewMultiaddr(a)
if err != nil {
return fmt.Errorf("error parsing proxy listen_multiaddress: %s", err)
}
cfg.ListenAddr = append(cfg.ListenAddr, proxyAddr)
}
cfg.ListenAddr = proxyAddr
}
if jcfg.NodeMultiaddress != "" {
nodeAddr, err := ma.NewMultiaddr(jcfg.NodeMultiaddress)
@ -266,7 +278,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
cfg.MaxHeaderBytes = jcfg.MaxHeaderBytes
}
if extra := jcfg.ExtractHeadersExtra; extra != nil && len(extra) > 0 {
if extra := jcfg.ExtractHeadersExtra; len(extra) > 0 {
cfg.ExtractHeadersExtra = extra
}
config.SetIfNotDefault(jcfg.ExtractHeadersPath, &cfg.ExtractHeadersPath)
@ -295,8 +307,13 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
jcfg = &jsonConfig{}
addresses := make([]string, 0, len(cfg.ListenAddr))
for _, a := range cfg.ListenAddr {
addresses = append(addresses, a.String())
}
// Set all configuration fields
jcfg.ListenMultiaddress = cfg.ListenAddr.String()
jcfg.ListenMultiaddress = addresses
jcfg.NodeMultiaddress = cfg.NodeAddr.String()
jcfg.ReadTimeout = cfg.ReadTimeout.String()
jcfg.ReadHeaderTimeout = cfg.ReadHeaderTimeout.String()
@ -316,3 +333,13 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
return
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}

View File

@ -40,7 +40,7 @@ func TestLoadJSON(t *testing.T) {
j := &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ListenMultiaddress = "abc"
j.ListenMultiaddress = []string{"abc"}
tst, _ := json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {

View File

@ -22,12 +22,12 @@ import (
handlers "github.com/gorilla/handlers"
mux "github.com/gorilla/mux"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
path "github.com/ipfs/go-path"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
@ -58,7 +58,7 @@ type Server struct {
rpcClient *rpc.Client
rpcReady chan struct{}
listener net.Listener // proxy listener
listeners []net.Listener // proxy listener
server *http.Server // proxy server
ipfsRoundTripper http.RoundTripper // allows to talk to IPFS
@ -126,14 +126,18 @@ func New(cfg *Config) (*Server, error) {
return nil, err
}
proxyNet, proxyAddr, err := manet.DialArgs(cfg.ListenAddr)
if err != nil {
return nil, err
}
var listeners []net.Listener
for _, addr := range cfg.ListenAddr {
proxyNet, proxyAddr, err := manet.DialArgs(addr)
if err != nil {
return nil, err
}
l, err := net.Listen(proxyNet, proxyAddr)
if err != nil {
return nil, err
l, err := net.Listen(proxyNet, proxyAddr)
if err != nil {
return nil, err
}
listeners = append(listeners, l)
}
nodeScheme := "http"
@ -197,7 +201,7 @@ func New(cfg *Config) (*Server, error) {
nodeAddr: nodeHTTPAddr,
nodeScheme: nodeScheme,
rpcReady: make(chan struct{}, 1),
listener: l,
listeners: listeners,
server: s,
ipfsRoundTripper: reverseProxy.Transport,
}
@ -284,7 +288,9 @@ func (proxy *Server) Shutdown(ctx context.Context) error {
proxy.cancel()
close(proxy.rpcReady)
proxy.server.SetKeepAlivesEnabled(false)
proxy.listener.Close()
for _, l := range proxy.listeners {
l.Close()
}
proxy.wg.Wait()
proxy.shutdown = true
@ -301,19 +307,27 @@ func (proxy *Server) run() {
defer proxy.shutdownLock.Unlock()
// This launches the proxy
proxy.wg.Add(1)
go func() {
defer proxy.wg.Done()
logger.Infof(
"IPFS Proxy: %s -> %s",
proxy.config.ListenAddr,
proxy.config.NodeAddr,
)
err := proxy.server.Serve(proxy.listener) // hangs here
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
}()
proxy.wg.Add(len(proxy.listeners))
for _, l := range proxy.listeners {
go func(l net.Listener) {
defer proxy.wg.Done()
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
logger.Error(err)
}
logger.Infof(
"IPFS Proxy: %s -> %s",
maddr,
proxy.config.NodeAddr,
)
err = proxy.server.Serve(l) // hangs here
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
}(l)
}
}
// ipfsErrorResponder writes an http error response just like IPFS would.
@ -326,13 +340,13 @@ func ipfsErrorResponder(w http.ResponseWriter, errMsg string, code int) {
w.WriteHeader(http.StatusInternalServerError)
}
w.Write(resBytes)
return
}
func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r)
arg := r.URL.Query().Get("arg")
q := r.URL.Query()
arg := q.Get("arg")
p, err := path.ParsePath(arg)
if err != nil {
ipfsErrorResponder(w, "Error parsing IPFS Path: "+err.Error(), -1)
@ -340,6 +354,8 @@ func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Requ
}
pinPath := &api.PinPath{Path: p.String()}
pinPath.Mode = api.PinModeFromString(q.Get("type"))
var pin api.Pin
err = proxy.rpcClient.Call(
"",
@ -359,7 +375,6 @@ func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Requ
resBytes, _ := json.Marshal(res)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
return
}
func (proxy *Server) pinHandler(w http.ResponseWriter, r *http.Request) {
@ -515,7 +530,6 @@ func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {
resBytes, _ := json.Marshal(res)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
return
}
func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
@ -546,7 +560,7 @@ func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
params.Layout = "trickle"
}
logger.Warningf("Proxy/add does not support all IPFS params. Current options: %+v", params)
logger.Warnf("Proxy/add does not support all IPFS params. Current options: %+v", params)
outputTransform := func(in *api.AddedOutput) interface{} {
r := &ipfsAddResp{
@ -614,8 +628,8 @@ func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers))
defer rpcutil.MultiCancel(cancels)
repoStats := make([]*api.IPFSRepoStat, len(peers), len(peers))
repoStatsIfaces := make([]interface{}, len(repoStats), len(repoStats))
repoStats := make([]*api.IPFSRepoStat, len(peers))
repoStatsIfaces := make([]interface{}, len(repoStats))
for i := range repoStats {
repoStats[i] = &api.IPFSRepoStat{}
repoStatsIfaces[i] = repoStats[i]
@ -648,7 +662,6 @@ func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
resBytes, _ := json.Marshal(totalStats)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
return
}
type ipfsRepoGCResp struct {
@ -704,8 +717,6 @@ func (proxy *Server) repoGCHandler(w http.ResponseWriter, r *http.Request) {
if !streamErrors && mErrStr != "" {
w.Header().Set("X-Stream-Error", mErrStr)
}
return
}
// slashHandler returns a handler which converts a /a/b/c/<argument> request

View File

@ -7,7 +7,6 @@ import (
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
@ -19,7 +18,7 @@ import (
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
)
@ -34,7 +33,7 @@ func testIPFSProxyWithConfig(t *testing.T, cfg *Config) (*Server, *test.IpfsMock
proxyMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfg.NodeAddr = nodeMAddr
cfg.ListenAddr = proxyMAddr
cfg.ListenAddr = []ma.Multiaddr{proxyMAddr}
cfg.ExtractHeadersExtra = []string{
test.IpfsCustomHeaderName,
test.IpfsTimeHeaderName,
@ -547,11 +546,11 @@ func TestProxyRepoGC(t *testing.T) {
}
testcases := []testcase{
testcase{
{
name: "With streaming errors",
streamErrors: true,
},
testcase{
{
name: "Without streaming errors",
streamErrors: false,
},
@ -623,25 +622,25 @@ func TestProxyAdd(t *testing.T) {
}
testcases := []testcase{
testcase{
{
query: "",
expectedCid: test.ShardingDirBalancedRootCID,
},
testcase{
{
query: "progress=true",
expectedCid: test.ShardingDirBalancedRootCID,
},
testcase{
{
query: "wrap-with-directory=true",
expectedCid: test.ShardingDirBalancedRootCIDWrapped,
},
testcase{
{
query: "trickle=true",
expectedCid: test.ShardingDirTrickleRootCID,
},
}
reqs := make([]*http.Request, len(testcases), len(testcases))
reqs := make([]*http.Request, len(testcases))
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
@ -716,7 +715,7 @@ func TestProxyError(t *testing.T) {
}
func proxyURL(c *Server) string {
addr := c.listener.Addr()
addr := c.listeners[0].Addr()
return fmt.Sprintf("http://%s/api/v0", addr.String())
}
@ -732,14 +731,6 @@ func TestIPFSProxy(t *testing.T) {
}
}
func mustParseURL(rawurl string) *url.URL {
u, err := url.Parse(rawurl)
if err != nil {
panic(err)
}
return u
}
func TestHeaderExtraction(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
@ -818,8 +809,8 @@ func TestAttackHeaderSize(t *testing.T) {
expectedStatus int
}
testcases := []testcase{
testcase{testHeaderSize / 2, http.StatusNotFound},
testcase{testHeaderSize * 2, http.StatusRequestHeaderFieldsTooLarge},
{testHeaderSize / 2, http.StatusNotFound},
{testHeaderSize * 2, http.StatusRequestHeaderFieldsTooLarge},
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/foo", proxyURL(proxy)), nil)

View File

@ -1,7 +1,3 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
//
// Actually, the above line is just to avoid golint complains.
// Run go:generate to generate the protobuf types in this module.
//
// Package pb provides protobuf definitions for serialized types in Cluster.
//go:generate protoc -I=. --go_out=. types.proto
package api_pb
package pb

View File

@ -1,139 +1,178 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.21.0
// protoc v3.11.3
// source: types.proto
package api_pb
package pb
import (
fmt "fmt"
//lint:ignore SA1019 protobuf generates deprecated imports
proto "github.com/golang/protobuf/proto"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Pin_PinType int32
const (
Pin_BadType Pin_PinType = 0
Pin_DataType Pin_PinType = 1
Pin_BadType Pin_PinType = 0 // 1 << iota
Pin_DataType Pin_PinType = 1 // 2 << iota
Pin_MetaType Pin_PinType = 2
Pin_ClusterDAGType Pin_PinType = 3
Pin_ShardType Pin_PinType = 4
)
var Pin_PinType_name = map[int32]string{
0: "BadType",
1: "DataType",
2: "MetaType",
3: "ClusterDAGType",
4: "ShardType",
}
// Enum value maps for Pin_PinType.
var (
Pin_PinType_name = map[int32]string{
0: "BadType",
1: "DataType",
2: "MetaType",
3: "ClusterDAGType",
4: "ShardType",
}
Pin_PinType_value = map[string]int32{
"BadType": 0,
"DataType": 1,
"MetaType": 2,
"ClusterDAGType": 3,
"ShardType": 4,
}
)
var Pin_PinType_value = map[string]int32{
"BadType": 0,
"DataType": 1,
"MetaType": 2,
"ClusterDAGType": 3,
"ShardType": 4,
func (x Pin_PinType) Enum() *Pin_PinType {
p := new(Pin_PinType)
*p = x
return p
}
func (x Pin_PinType) String() string {
return proto.EnumName(Pin_PinType_name, int32(x))
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Pin_PinType) Descriptor() protoreflect.EnumDescriptor {
return file_types_proto_enumTypes[0].Descriptor()
}
func (Pin_PinType) Type() protoreflect.EnumType {
return &file_types_proto_enumTypes[0]
}
func (x Pin_PinType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Pin_PinType.Descriptor instead.
func (Pin_PinType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{0, 0}
return file_types_proto_rawDescGZIP(), []int{0, 0}
}
type Pin struct {
Cid []byte `protobuf:"bytes,1,opt,name=Cid,proto3" json:"Cid,omitempty"`
Type Pin_PinType `protobuf:"varint,2,opt,name=Type,proto3,enum=api.pb.Pin_PinType" json:"Type,omitempty"`
Allocations [][]byte `protobuf:"bytes,3,rep,name=Allocations,proto3" json:"Allocations,omitempty"`
MaxDepth int32 `protobuf:"zigzag32,4,opt,name=MaxDepth,proto3" json:"MaxDepth,omitempty"`
Reference []byte `protobuf:"bytes,5,opt,name=Reference,proto3" json:"Reference,omitempty"`
Options *PinOptions `protobuf:"bytes,6,opt,name=Options,proto3" json:"Options,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Cid []byte `protobuf:"bytes,1,opt,name=Cid,proto3" json:"Cid,omitempty"`
Type Pin_PinType `protobuf:"varint,2,opt,name=Type,proto3,enum=api.pb.Pin_PinType" json:"Type,omitempty"`
Allocations [][]byte `protobuf:"bytes,3,rep,name=Allocations,proto3" json:"Allocations,omitempty"`
MaxDepth int32 `protobuf:"zigzag32,4,opt,name=MaxDepth,proto3" json:"MaxDepth,omitempty"`
Reference []byte `protobuf:"bytes,5,opt,name=Reference,proto3" json:"Reference,omitempty"`
Options *PinOptions `protobuf:"bytes,6,opt,name=Options,proto3" json:"Options,omitempty"`
}
func (m *Pin) Reset() { *m = Pin{} }
func (m *Pin) String() string { return proto.CompactTextString(m) }
func (*Pin) ProtoMessage() {}
func (x *Pin) Reset() {
*x = Pin{}
if protoimpl.UnsafeEnabled {
mi := &file_types_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pin) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pin) ProtoMessage() {}
func (x *Pin) ProtoReflect() protoreflect.Message {
mi := &file_types_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pin.ProtoReflect.Descriptor instead.
func (*Pin) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{0}
return file_types_proto_rawDescGZIP(), []int{0}
}
func (m *Pin) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Pin.Unmarshal(m, b)
}
func (m *Pin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Pin.Marshal(b, m, deterministic)
}
func (m *Pin) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pin.Merge(m, src)
}
func (m *Pin) XXX_Size() int {
return xxx_messageInfo_Pin.Size(m)
}
func (m *Pin) XXX_DiscardUnknown() {
xxx_messageInfo_Pin.DiscardUnknown(m)
}
var xxx_messageInfo_Pin proto.InternalMessageInfo
func (m *Pin) GetCid() []byte {
if m != nil {
return m.Cid
func (x *Pin) GetCid() []byte {
if x != nil {
return x.Cid
}
return nil
}
func (m *Pin) GetType() Pin_PinType {
if m != nil {
return m.Type
func (x *Pin) GetType() Pin_PinType {
if x != nil {
return x.Type
}
return Pin_BadType
}
func (m *Pin) GetAllocations() [][]byte {
if m != nil {
return m.Allocations
func (x *Pin) GetAllocations() [][]byte {
if x != nil {
return x.Allocations
}
return nil
}
func (m *Pin) GetMaxDepth() int32 {
if m != nil {
return m.MaxDepth
func (x *Pin) GetMaxDepth() int32 {
if x != nil {
return x.MaxDepth
}
return 0
}
func (m *Pin) GetReference() []byte {
if m != nil {
return m.Reference
func (x *Pin) GetReference() []byte {
if x != nil {
return x.Reference
}
return nil
}
func (m *Pin) GetOptions() *PinOptions {
if m != nil {
return m.Options
func (x *Pin) GetOptions() *PinOptions {
if x != nil {
return x.Options
}
return nil
}
type PinOptions struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ReplicationFactorMin int32 `protobuf:"zigzag32,1,opt,name=ReplicationFactorMin,proto3" json:"ReplicationFactorMin,omitempty"`
ReplicationFactorMax int32 `protobuf:"zigzag32,2,opt,name=ReplicationFactorMax,proto3" json:"ReplicationFactorMax,omitempty"`
Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"`
@ -141,120 +180,216 @@ type PinOptions struct {
Metadata map[string]string `protobuf:"bytes,6,rep,name=Metadata,proto3" json:"Metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
PinUpdate []byte `protobuf:"bytes,7,opt,name=PinUpdate,proto3" json:"PinUpdate,omitempty"`
ExpireAt uint64 `protobuf:"varint,8,opt,name=ExpireAt,proto3" json:"ExpireAt,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PinOptions) Reset() { *m = PinOptions{} }
func (m *PinOptions) String() string { return proto.CompactTextString(m) }
func (*PinOptions) ProtoMessage() {}
func (x *PinOptions) Reset() {
*x = PinOptions{}
if protoimpl.UnsafeEnabled {
mi := &file_types_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PinOptions) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PinOptions) ProtoMessage() {}
func (x *PinOptions) ProtoReflect() protoreflect.Message {
mi := &file_types_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PinOptions.ProtoReflect.Descriptor instead.
func (*PinOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{1}
return file_types_proto_rawDescGZIP(), []int{1}
}
func (m *PinOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PinOptions.Unmarshal(m, b)
}
func (m *PinOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PinOptions.Marshal(b, m, deterministic)
}
func (m *PinOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_PinOptions.Merge(m, src)
}
func (m *PinOptions) XXX_Size() int {
return xxx_messageInfo_PinOptions.Size(m)
}
func (m *PinOptions) XXX_DiscardUnknown() {
xxx_messageInfo_PinOptions.DiscardUnknown(m)
}
var xxx_messageInfo_PinOptions proto.InternalMessageInfo
func (m *PinOptions) GetReplicationFactorMin() int32 {
if m != nil {
return m.ReplicationFactorMin
func (x *PinOptions) GetReplicationFactorMin() int32 {
if x != nil {
return x.ReplicationFactorMin
}
return 0
}
func (m *PinOptions) GetReplicationFactorMax() int32 {
if m != nil {
return m.ReplicationFactorMax
func (x *PinOptions) GetReplicationFactorMax() int32 {
if x != nil {
return x.ReplicationFactorMax
}
return 0
}
func (m *PinOptions) GetName() string {
if m != nil {
return m.Name
func (x *PinOptions) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (m *PinOptions) GetShardSize() uint64 {
if m != nil {
return m.ShardSize
func (x *PinOptions) GetShardSize() uint64 {
if x != nil {
return x.ShardSize
}
return 0
}
func (m *PinOptions) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
func (x *PinOptions) GetMetadata() map[string]string {
if x != nil {
return x.Metadata
}
return nil
}
func (m *PinOptions) GetPinUpdate() []byte {
if m != nil {
return m.PinUpdate
func (x *PinOptions) GetPinUpdate() []byte {
if x != nil {
return x.PinUpdate
}
return nil
}
func (m *PinOptions) GetExpireAt() uint64 {
if m != nil {
return m.ExpireAt
func (x *PinOptions) GetExpireAt() uint64 {
if x != nil {
return x.ExpireAt
}
return 0
}
func init() {
proto.RegisterEnum("api.pb.Pin_PinType", Pin_PinType_name, Pin_PinType_value)
proto.RegisterType((*Pin)(nil), "api.pb.Pin")
proto.RegisterType((*PinOptions)(nil), "api.pb.PinOptions")
proto.RegisterMapType((map[string]string)(nil), "api.pb.PinOptions.MetadataEntry")
var File_types_proto protoreflect.FileDescriptor
var file_types_proto_rawDesc = []byte{
0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61,
0x70, 0x69, 0x2e, 0x70, 0x62, 0x22, 0xa1, 0x02, 0x0a, 0x03, 0x50, 0x69, 0x6e, 0x12, 0x10, 0x0a,
0x03, 0x43, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x43, 0x69, 0x64, 0x12,
0x27, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x54, 0x79,
0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x6c, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x41,
0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x4d, 0x61,
0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x08, 0x4d, 0x61,
0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65,
0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72,
0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50,
0x69, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x22, 0x55, 0x0a, 0x07, 0x50, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
0x07, 0x42, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x61,
0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61,
0x54, 0x79, 0x70, 0x65, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x44, 0x41, 0x47, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x68,
0x61, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x22, 0xe1, 0x02, 0x0a, 0x0a, 0x50, 0x69,
0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x14,
0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f,
0x72, 0x4d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x78,
0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69, 0x7a,
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69,
0x7a, 0x65, 0x12, 0x3c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69,
0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x12, 0x1c, 0x0a, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20,
0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a,
0x0a, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04,
0x52, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x06, 0x5a,
0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var (
file_types_proto_rawDescOnce sync.Once
file_types_proto_rawDescData = file_types_proto_rawDesc
)
var fileDescriptor_d938547f84707355 = []byte{
// 405 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0x65, 0x6d, 0xc7, 0xb1, 0xc7, 0x69, 0x95, 0x0e, 0x3d, 0xac, 0x2a, 0x0e, 0xab, 0x5c, 0xf0,
0x01, 0xf9, 0x60, 0x2e, 0x08, 0xb8, 0x84, 0xa6, 0x20, 0x21, 0x05, 0xa2, 0x2d, 0xfd, 0x80, 0x6d,
0xbc, 0xa8, 0x2b, 0x8c, 0xbd, 0x72, 0xb6, 0xc8, 0xe6, 0x6f, 0xf8, 0x34, 0xfe, 0x04, 0xed, 0x6e,
0xea, 0x14, 0x35, 0x07, 0x4b, 0xf3, 0xde, 0xcc, 0x9b, 0x19, 0xbf, 0x1d, 0xc8, 0xcc, 0xa0, 0xe5,
0xae, 0xd0, 0x5d, 0x6b, 0x5a, 0x8c, 0x85, 0x56, 0x85, 0xbe, 0x5d, 0xfc, 0x09, 0x20, 0xdc, 0xa8,
0x06, 0xe7, 0x10, 0x5e, 0xaa, 0x8a, 0x12, 0x46, 0xf2, 0x19, 0xb7, 0x21, 0xbe, 0x84, 0xe8, 0xdb,
0xa0, 0x25, 0x0d, 0x18, 0xc9, 0x4f, 0xcb, 0xe7, 0x85, 0x17, 0x14, 0x1b, 0xd5, 0xd8, 0xcf, 0xa6,
0xb8, 0x2b, 0x40, 0x06, 0xd9, 0xb2, 0xae, 0xdb, 0xad, 0x30, 0xaa, 0x6d, 0x76, 0x34, 0x64, 0x61,
0x3e, 0xe3, 0x8f, 0x29, 0xbc, 0x80, 0x64, 0x2d, 0xfa, 0x95, 0xd4, 0xe6, 0x8e, 0x46, 0x8c, 0xe4,
0x67, 0x7c, 0xc4, 0xf8, 0x02, 0x52, 0x2e, 0xbf, 0xcb, 0x4e, 0x36, 0x5b, 0x49, 0x27, 0x6e, 0xfc,
0x81, 0xc0, 0x57, 0x30, 0xfd, 0xaa, 0x7d, 0xdf, 0x98, 0x91, 0x3c, 0x2b, 0xf1, 0xd1, 0x1e, 0xfb,
0x0c, 0x7f, 0x28, 0x59, 0xdc, 0xc0, 0x74, 0xbf, 0x1a, 0x66, 0x30, 0xfd, 0x20, 0x2a, 0x1b, 0xce,
0x9f, 0xe1, 0x0c, 0x92, 0x95, 0x30, 0xc2, 0x21, 0x62, 0xd1, 0x5a, 0xee, 0x51, 0x80, 0x08, 0xa7,
0x97, 0xf5, 0xfd, 0xce, 0xc8, 0x6e, 0xb5, 0xfc, 0xe4, 0xb8, 0x10, 0x4f, 0x20, 0xbd, 0xbe, 0x13,
0x9d, 0x97, 0x47, 0x8b, 0xbf, 0x01, 0xc0, 0x61, 0x1c, 0x96, 0x70, 0xce, 0xa5, 0xae, 0x95, 0xff,
0xbb, 0x8f, 0x62, 0x6b, 0xda, 0x6e, 0xad, 0x1a, 0xe7, 0xdd, 0x19, 0x3f, 0x9a, 0x3b, 0xae, 0x11,
0xbd, 0x33, 0xf7, 0xa8, 0x46, 0xf4, 0x88, 0x10, 0x7d, 0x11, 0x3f, 0x25, 0x0d, 0x19, 0xc9, 0x53,
0xee, 0x62, 0xeb, 0x96, 0xdb, 0xec, 0x5a, 0xfd, 0x96, 0xce, 0xca, 0x88, 0x1f, 0x08, 0x7c, 0xef,
0xff, 0xac, 0x12, 0x46, 0xd0, 0x98, 0x85, 0x79, 0x56, 0xb2, 0xa7, 0x76, 0x15, 0x0f, 0x25, 0x57,
0x8d, 0xe9, 0x06, 0x3e, 0x2a, 0x6c, 0xef, 0x8d, 0x6a, 0x6e, 0x74, 0x25, 0x8c, 0xa4, 0x53, 0xff,
0x12, 0x23, 0x61, 0xdf, 0xf0, 0xaa, 0xd7, 0xaa, 0x93, 0x4b, 0x43, 0x13, 0x37, 0x78, 0xc4, 0x17,
0xef, 0xe0, 0xe4, 0xbf, 0xa6, 0xf6, 0x9a, 0x7e, 0xc8, 0xc1, 0x39, 0x92, 0x72, 0x1b, 0xe2, 0x39,
0x4c, 0x7e, 0x89, 0xfa, 0xde, 0x9f, 0x53, 0xca, 0x3d, 0x78, 0x1b, 0xbc, 0x21, 0x9f, 0xa3, 0x64,
0x32, 0x8f, 0x6f, 0x63, 0x77, 0x96, 0xaf, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x59, 0x74,
0x86, 0xa5, 0x02, 0x00, 0x00,
func file_types_proto_rawDescGZIP() []byte {
file_types_proto_rawDescOnce.Do(func() {
file_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_types_proto_rawDescData)
})
return file_types_proto_rawDescData
}
var file_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_types_proto_goTypes = []interface{}{
(Pin_PinType)(0), // 0: api.pb.Pin.PinType
(*Pin)(nil), // 1: api.pb.Pin
(*PinOptions)(nil), // 2: api.pb.PinOptions
nil, // 3: api.pb.PinOptions.MetadataEntry
}
var file_types_proto_depIdxs = []int32{
0, // 0: api.pb.Pin.Type:type_name -> api.pb.Pin.PinType
2, // 1: api.pb.Pin.Options:type_name -> api.pb.PinOptions
3, // 2: api.pb.PinOptions.Metadata:type_name -> api.pb.PinOptions.MetadataEntry
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_types_proto_init() }
func file_types_proto_init() {
if File_types_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pin); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PinOptions); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_types_proto_rawDesc,
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_types_proto_goTypes,
DependencyIndexes: file_types_proto_depIdxs,
EnumInfos: file_types_proto_enumTypes,
MessageInfos: file_types_proto_msgTypes,
}.Build()
File_types_proto = out.File
file_types_proto_rawDesc = nil
file_types_proto_goTypes = nil
file_types_proto_depIdxs = nil
}

View File

@ -1,8 +1,9 @@
syntax = "proto3";
package api.pb;
option go_package=".;pb";
message Pin {
bytes Cid = 1;
enum PinType {
BadType = 0; // 1 << iota
DataType = 1; // 2 << iota
@ -10,6 +11,8 @@ message Pin {
ClusterDAGType = 3;
ShardType = 4;
}
bytes Cid = 1;
PinType Type = 2;
repeated bytes Allocations = 3;
sint32 MaxDepth = 4;

View File

@ -15,12 +15,13 @@ import (
cid "github.com/ipfs/go-cid"
shell "github.com/ipfs/go-ipfs-api"
files "github.com/ipfs/go-ipfs-files"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
host "github.com/libp2p/go-libp2p-core/host"
peer "github.com/libp2p/go-libp2p-core/peer"
pnet "github.com/libp2p/go-libp2p-core/pnet"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
@ -148,7 +149,7 @@ type Config struct {
// If APIAddr is provided, and the peer uses private networks (pnet),
// then we need to provide the key. If the peer is the cluster peer,
// this corresponds to the cluster secret.
ProtectorKey []byte
ProtectorKey pnet.PSK
// ProxyAddr is used to obtain a go-ipfs-api Shell instance pointing
// to the ipfs proxy endpoint of ipfs-cluster. If empty, the location
@ -167,11 +168,31 @@ type Config struct {
LogLevel string
}
// AsTemplateFor creates client configs from resolved multiaddresses
func (c *Config) AsTemplateFor(addrs []ma.Multiaddr) ([]*Config) {
var cfgs []*Config
for _, addr := range addrs {
cfg := *c
cfg.APIAddr = addr
cfgs = append(cfgs, &cfg)
}
return cfgs
}
// AsTemplateForResolvedAddress creates client configs from a multiaddress
func (c *Config) AsTemplateForResolvedAddress(ctx context.Context, addr ma.Multiaddr) ([]*Config, error) {
resolvedAddrs, err := resolveAddr(ctx, addr)
if err != nil {
return nil, err
}
return c.AsTemplateFor(resolvedAddrs), nil
}
// DefaultClient provides methods to interact with the ipfs-cluster API. Use
// NewDefaultClient() to create one.
type defaultClient struct {
ctx context.Context
cancel func()
cancel context.CancelFunc
config *Config
transport *http.Transport
net string
@ -182,9 +203,10 @@ type defaultClient struct {
// NewDefaultClient initializes a client given a Config.
func NewDefaultClient(cfg *Config) (Client, error) {
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
client := &defaultClient{
ctx: ctx,
cancel: cancel,
config: cfg,
}
@ -263,17 +285,10 @@ func (c *defaultClient) resolveAPIAddr() error {
if !IsPeerAddress(c.config.APIAddr) {
return nil
}
resolveCtx, cancel := context.WithTimeout(c.ctx, ResolveTimeout)
defer cancel()
resolved, err := madns.Resolve(resolveCtx, c.config.APIAddr)
resolved, err := resolveAddr(c.ctx, c.config.APIAddr)
if err != nil {
return err
}
if len(resolved) == 0 {
return fmt.Errorf("resolving %s returned 0 results", c.config.APIAddr)
}
c.config.APIAddr = resolved[0]
return nil
}
@ -354,7 +369,7 @@ func IsPeerAddress(addr ma.Multiaddr) bool {
return false
}
pid, err := addr.ValueForProtocol(ma.P_P2P)
dnsaddr, err2 := addr.ValueForProtocol(madns.DnsaddrProtocol.Code)
dnsaddr, err2 := addr.ValueForProtocol(ma.P_DNSADDR)
return (pid != "" && err == nil) || (dnsaddr != "" && err2 == nil)
}
@ -367,3 +382,19 @@ func isUnixSocketAddress(addr ma.Multiaddr) bool {
value, err := addr.ValueForProtocol(ma.P_UNIX)
return (value != "" && err == nil)
}
// resolve addr
func resolveAddr(ctx context.Context, addr ma.Multiaddr) ([]ma.Multiaddr, error) {
resolveCtx, cancel := context.WithTimeout(ctx, ResolveTimeout)
defer cancel()
resolved, err := madns.Resolve(resolveCtx, addr)
if err != nil {
return nil, err
}
if len(resolved) == 0 {
return nil, fmt.Errorf("resolving %s returned 0 results", addr)
}
return resolved, nil
}

View File

@ -11,7 +11,7 @@ import (
libp2p "github.com/libp2p/go-libp2p"
peer "github.com/libp2p/go-libp2p-core/peer"
pnet "github.com/libp2p/go-libp2p-pnet"
pnet "github.com/libp2p/go-libp2p-core/pnet"
ma "github.com/multiformats/go-multiaddr"
)
@ -22,17 +22,13 @@ func testAPI(t *testing.T) *rest.API {
cfg := &rest.Config{}
cfg.Default()
cfg.HTTPListenAddr = apiMAddr
var secret [32]byte
prot, err := pnet.NewV1ProtectorFromBytes(&secret)
if err != nil {
t.Fatal(err)
}
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
secret := make(pnet.PSK, 32)
h, err := libp2p.New(
context.Background(),
libp2p.ListenAddrs(apiMAddr),
libp2p.PrivateNetwork(prot),
libp2p.PrivateNetwork(secret),
)
if err != nil {
t.Fatal(err)
@ -54,15 +50,15 @@ func shutdown(a *rest.API) {
}
func apiMAddr(a *rest.API) ma.Multiaddr {
listen, _ := a.HTTPAddress()
hostPort := strings.Split(listen, ":")
listen, _ := a.HTTPAddresses()
hostPort := strings.Split(listen[0], ":")
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%s", hostPort[1]))
return addr
}
func peerMAddr(a *rest.API) ma.Multiaddr {
ipfsAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peer.IDB58Encode(a.Host().ID())))
ipfsAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", peer.Encode(a.Host().ID())))
for _, a := range a.Host().Addrs() {
if _, err := a.ValueForProtocol(ma.P_IP4); err == nil {
return a.Encapsulate(ipfsAddr)
@ -163,19 +159,19 @@ func TestHostPort(t *testing.T) {
}
testcases := []testcase{
testcase{
{
host: "3.3.1.1",
port: "9094",
expectedHostname: "3.3.1.1:9094",
expectedProxyAddr: "/ip4/3.3.1.1/tcp/9095",
},
testcase{
{
host: "ipfs.io",
port: "9094",
expectedHostname: "ipfs.io:9094",
expectedProxyAddr: "/dns4/ipfs.io/tcp/9095",
},
testcase{
{
host: "2001:db8::1",
port: "9094",
expectedHostname: "[2001:db8::1]:9094",

View File

@ -8,7 +8,7 @@ import (
shell "github.com/ipfs/go-ipfs-api"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipfs/ipfs-cluster/api"
peer "github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-core/peer"
)
// loadBalancingClient is a client to interact with IPFS Cluster APIs

View File

@ -52,7 +52,7 @@ func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, erro
ctx, span := trace.StartSpan(ctx, "client/PeerAdd")
defer span.End()
pidStr := peer.IDB58Encode(pid)
pidStr := peer.Encode(pid)
body := peerAddBody{pidStr}
var buf bytes.Buffer
@ -350,7 +350,7 @@ func (c *defaultClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepo
}
// WaitFor is a utility function that allows for a caller to wait for a
// paticular status for a CID (as defined by StatusFilterParams).
// particular status for a CID (as defined by StatusFilterParams).
// It returns the final status for that CID and an error, if there was.
//
// WaitFor works by calling Status() repeatedly and checking that all
@ -528,7 +528,7 @@ func (c *defaultClient) Add(
ctx, span := trace.StartSpan(ctx, "client/Add")
defer span.End()
addFiles := make([]files.DirEntry, len(paths), len(paths))
addFiles := make([]files.DirEntry, len(paths))
for i, p := range paths {
u, err := url.Parse(p)
if err != nil {

View File

@ -6,10 +6,9 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
types "github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/api/rest"
"github.com/ipfs/ipfs-cluster/test"
rest "github.com/ipfs/ipfs-cluster/api/rest"
test "github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-core/peer"
@ -90,6 +89,7 @@ func TestPeersWithError(t *testing.T) {
testF := func(t *testing.T, c Client) {
addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/44444")
var _ = c
c, _ = NewDefaultClient(&Config{APIAddr: addr, DisableKeepAlives: true})
ids, err := c.Peers(ctx)
if err == nil {
@ -367,7 +367,7 @@ func TestStatusAll(t *testing.T) {
t.Error("there should be two pins")
}
pins, err = c.StatusAll(ctx, 1<<25, false)
_, err = c.StatusAll(ctx, 1<<25, false)
if err == nil {
t.Error("expected an error")
}
@ -499,7 +499,7 @@ type waitService struct {
pinStart time.Time
}
func (wait *waitService) Pin(ctx context.Context, in *api.Pin, out *api.Pin) error {
func (wait *waitService) Pin(ctx context.Context, in *types.Pin, out *types.Pin) error {
wait.l.Lock()
defer wait.l.Unlock()
wait.pinStart = time.Now()
@ -507,41 +507,33 @@ func (wait *waitService) Pin(ctx context.Context, in *api.Pin, out *api.Pin) err
return nil
}
func (wait *waitService) Status(ctx context.Context, in cid.Cid, out *api.GlobalPinInfo) error {
func (wait *waitService) Status(ctx context.Context, in cid.Cid, out *types.GlobalPinInfo) error {
wait.l.Lock()
defer wait.l.Unlock()
if time.Now().After(wait.pinStart.Add(5 * time.Second)) { //pinned
*out = api.GlobalPinInfo{
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.PeerID1): {
Cid: in,
Peer: test.PeerID1,
Status: api.TrackerStatusPinned,
PeerMap: map[string]*types.PinInfoShort{
peer.Encode(test.PeerID1): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
peer.IDB58Encode(test.PeerID2): {
Cid: in,
Peer: test.PeerID2,
Status: api.TrackerStatusPinned,
peer.Encode(test.PeerID2): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
},
}
} else { // pinning
*out = api.GlobalPinInfo{
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(test.PeerID1): {
Cid: in,
Peer: test.PeerID1,
Status: api.TrackerStatusPinning,
PeerMap: map[string]*types.PinInfoShort{
peer.Encode(test.PeerID1): {
Status: types.TrackerStatusPinning,
TS: wait.pinStart,
},
peer.IDB58Encode(test.PeerID2): {
Cid: in,
Peer: test.PeerID2,
Status: api.TrackerStatusPinned,
peer.Encode(test.PeerID2): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
},
@ -577,21 +569,23 @@ func TestWaitFor(t *testing.T) {
fp := StatusFilterParams{
Cid: test.Cid1,
Local: false,
Target: api.TrackerStatusPinned,
Target: types.TrackerStatusPinned,
CheckFreq: time.Second,
}
start := time.Now()
st, err := WaitFor(ctx, c, fp)
if err != nil {
t.Fatal(err)
t.Error(err)
return
}
if time.Now().Sub(start) <= 5*time.Second {
t.Fatal("slow pin should have taken at least 5 seconds")
if time.Since(start) <= 5*time.Second {
t.Error("slow pin should have taken at least 5 seconds")
return
}
for _, pi := range st.PeerMap {
if pi.Status != api.TrackerStatusPinned {
if pi.Status != types.TrackerStatusPinned {
t.Error("pin info should show the item is pinned")
}
}

View File

@ -61,7 +61,7 @@ func (c *defaultClient) doRequest(
urlpath := c.net + "://" + c.hostname + "/" + strings.TrimPrefix(path, "/")
logger.Debugf("%s: %s", method, urlpath)
r, err := http.NewRequest(method, urlpath, body)
r, err := http.NewRequestWithContext(ctx, method, urlpath, body)
if err != nil {
return nil, err
}
@ -73,10 +73,8 @@ func (c *defaultClient) doRequest(
r.SetBasicAuth(c.config.Username, c.config.Password)
}
if headers != nil {
for k, v := range headers {
r.Header.Set(k, v)
}
for k, v := range headers {
r.Header.Set(k, v)
}
if body != nil {
@ -101,7 +99,7 @@ func (c *defaultClient) handleResponse(resp *http.Response, obj interface{}) err
case resp.StatusCode == http.StatusAccepted:
logger.Debug("Request accepted")
case resp.StatusCode == http.StatusNoContent:
logger.Debug("Request suceeded. Response has no content")
logger.Debug("Request succeeded. Response has no content")
default:
if resp.StatusCode > 399 && resp.StatusCode < 600 {
var apiErr api.Error

View File

@ -11,14 +11,11 @@ import (
libp2p "github.com/libp2p/go-libp2p"
peer "github.com/libp2p/go-libp2p-core/peer"
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
ipnet "github.com/libp2p/go-libp2p-core/pnet"
p2phttp "github.com/libp2p/go-libp2p-http"
pnet "github.com/libp2p/go-libp2p-pnet"
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
secio "github.com/libp2p/go-libp2p-secio"
libp2ptls "github.com/libp2p/go-libp2p-tls"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/tv42/httpunix"
)
@ -54,25 +51,18 @@ func (c *defaultClient) enableLibp2p() error {
return errors.New("APIAddr only includes a Peer ID")
}
var prot ipnet.Protector
if c.config.ProtectorKey != nil && len(c.config.ProtectorKey) > 0 {
if len(c.config.ProtectorKey) != 32 {
return errors.New("length of ProtectorKey should be 32")
}
var key [32]byte
copy(key[:], c.config.ProtectorKey)
prot, err = pnet.NewV1ProtectorFromBytes(&key)
if err != nil {
return err
}
}
h, err := libp2p.New(c.ctx,
libp2p.PrivateNetwork(prot),
libp2p.PrivateNetwork(c.config.ProtectorKey),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
libp2p.Security(secio.ID, secio.New),
libp2p.Transport(libp2pquic.NewTransport),
// TODO: quic does not support private networks
//libp2p.Transport(libp2pquic.NewTransport),
libp2p.DefaultTransports,
)
if err != nil {
@ -90,7 +80,7 @@ func (c *defaultClient) enableLibp2p() error {
c.transport.RegisterProtocol("libp2p", p2phttp.NewTransport(h))
c.net = "libp2p"
c.p2p = h
c.hostname = peer.IDB58Encode(pinfo.ID)
c.hostname = peer.Encode(pinfo.ID)
return nil
}

View File

@ -10,14 +10,15 @@ import (
"path/filepath"
"time"
"github.com/ipfs/ipfs-cluster/config"
ipfsconfig "github.com/ipfs/go-ipfs-config"
crypto "github.com/libp2p/go-libp2p-core/crypto"
peer "github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/kelseyhightower/envconfig"
"github.com/rs/cors"
"github.com/ipfs/ipfs-cluster/config"
)
const configKey = "restapi"
@ -25,9 +26,13 @@ const envConfigKey = "cluster_restapi"
const minMaxHeaderBytes = 4096
// DefaultHTTPListenAddrs contains default listen addresses for the HTTP API.
var DefaultHTTPListenAddrs = []string{
"/ip4/127.0.0.1/tcp/9094",
}
// These are the default values for Config
const (
DefaultHTTPListenAddr = "/ip4/127.0.0.1/tcp/9094"
DefaultReadTimeout = 0
DefaultReadHeaderTimeout = 5 * time.Second
DefaultWriteTimeout = 0
@ -66,7 +71,7 @@ type Config struct {
config.Saver
// Listen address for the HTTP REST API endpoint.
HTTPListenAddr ma.Multiaddr
HTTPListenAddr []ma.Multiaddr
// TLS configuration for the HTTP listener
TLS *tls.Config
@ -97,7 +102,7 @@ type Config struct {
MaxHeaderBytes int
// Listen address for the Libp2p REST API endpoint.
Libp2pListenAddr ma.Multiaddr
Libp2pListenAddr []ma.Multiaddr
// ID and PrivateKey are used to create a libp2p host if we
// want the API component to do it (not by default).
@ -131,20 +136,20 @@ type Config struct {
}
type jsonConfig struct {
HTTPListenMultiaddress string `json:"http_listen_multiaddress"`
SSLCertFile string `json:"ssl_cert_file,omitempty"`
SSLKeyFile string `json:"ssl_key_file,omitempty"`
ReadTimeout string `json:"read_timeout"`
ReadHeaderTimeout string `json:"read_header_timeout"`
WriteTimeout string `json:"write_timeout"`
IdleTimeout string `json:"idle_timeout"`
MaxHeaderBytes int `json:"max_header_bytes"`
HTTPListenMultiaddress ipfsconfig.Strings `json:"http_listen_multiaddress"`
SSLCertFile string `json:"ssl_cert_file,omitempty"`
SSLKeyFile string `json:"ssl_key_file,omitempty"`
ReadTimeout string `json:"read_timeout"`
ReadHeaderTimeout string `json:"read_header_timeout"`
WriteTimeout string `json:"write_timeout"`
IdleTimeout string `json:"idle_timeout"`
MaxHeaderBytes int `json:"max_header_bytes"`
Libp2pListenMultiaddress string `json:"libp2p_listen_multiaddress,omitempty"`
ID string `json:"id,omitempty"`
PrivateKey string `json:"private_key,omitempty"`
Libp2pListenMultiaddress ipfsconfig.Strings `json:"libp2p_listen_multiaddress,omitempty"`
ID string `json:"id,omitempty"`
PrivateKey string `json:"private_key,omitempty" hidden:"true"`
BasicAuthCredentials map[string]string `json:"basic_auth_credentials"`
BasicAuthCredentials map[string]string `json:"basic_auth_credentials" hidden:"true"`
HTTPLogFile string `json:"http_log_file"`
Headers map[string][]string `json:"headers"`
@ -179,8 +184,15 @@ func (cfg *Config) ConfigKey() string {
// Default initializes this Config with working values.
func (cfg *Config) Default() error {
// http
httpListen, _ := ma.NewMultiaddr(DefaultHTTPListenAddr)
cfg.HTTPListenAddr = httpListen
addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs))
for _, def := range DefaultHTTPListenAddrs {
httpListen, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
addrs = append(addrs, httpListen)
}
cfg.HTTPListenAddr = addrs
cfg.pathSSLCertFile = ""
cfg.pathSSLKeyFile = ""
cfg.ReadTimeout = DefaultReadTimeout
@ -225,7 +237,6 @@ func (cfg *Config) ApplyEnvVars() error {
if err != nil {
return err
}
return cfg.applyJSONConfig(jcfg)
}
@ -255,9 +266,9 @@ func (cfg *Config) Validate() error {
}
func (cfg *Config) validateLibp2p() error {
if cfg.ID != "" || cfg.PrivateKey != nil || cfg.Libp2pListenAddr != nil {
if cfg.ID != "" || cfg.PrivateKey != nil || len(cfg.Libp2pListenAddr) > 0 {
// if one is set, all should be
if cfg.ID == "" || cfg.PrivateKey == nil || cfg.Libp2pListenAddr == nil {
if cfg.ID == "" || cfg.PrivateKey == nil || len(cfg.Libp2pListenAddr) == 0 {
return errors.New("all ID, private_key and libp2p_listen_multiaddress should be set")
}
if !cfg.ID.MatchesPrivateKey(cfg.PrivateKey) {
@ -288,6 +299,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
if err != nil {
return err
}
err = cfg.loadLibp2pOptions(jcfg)
if err != nil {
return err
@ -302,13 +314,16 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
}
func (cfg *Config) loadHTTPOptions(jcfg *jsonConfig) error {
if httpListen := jcfg.HTTPListenMultiaddress; httpListen != "" {
httpAddr, err := ma.NewMultiaddr(httpListen)
if err != nil {
err = fmt.Errorf("error parsing restapi.http_listen_multiaddress: %s", err)
return err
if addresses := jcfg.HTTPListenMultiaddress; len(addresses) > 0 {
cfg.HTTPListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, addr := range addresses {
httpAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing restapi.http_listen_multiaddress: %s", err)
return err
}
cfg.HTTPListenAddr = append(cfg.HTTPListenAddr, httpAddr)
}
cfg.HTTPListenAddr = httpAddr
}
err := cfg.tlsOptions(jcfg)
@ -373,13 +388,16 @@ func (cfg *Config) tlsOptions(jcfg *jsonConfig) error {
}
func (cfg *Config) loadLibp2pOptions(jcfg *jsonConfig) error {
if libp2pListen := jcfg.Libp2pListenMultiaddress; libp2pListen != "" {
libp2pAddr, err := ma.NewMultiaddr(libp2pListen)
if err != nil {
err = fmt.Errorf("error parsing restapi.libp2p_listen_multiaddress: %s", err)
return err
if addresses := jcfg.Libp2pListenMultiaddress; len(addresses) > 0 {
cfg.Libp2pListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, addr := range addresses {
libp2pAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing restapi.libp2p_listen_multiaddress: %s", err)
return err
}
cfg.Libp2pListenAddr = append(cfg.Libp2pListenAddr, libp2pAddr)
}
cfg.Libp2pListenAddr = libp2pAddr
}
if jcfg.PrivateKey != "" {
@ -395,7 +413,7 @@ func (cfg *Config) loadLibp2pOptions(jcfg *jsonConfig) error {
}
if jcfg.ID != "" {
id, err := peer.IDB58Decode(jcfg.ID)
id, err := peer.Decode(jcfg.ID)
if err != nil {
return fmt.Errorf("error parsing restapi.ID: %s", err)
}
@ -424,8 +442,18 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
}
}()
httpAddresses := make([]string, 0, len(cfg.HTTPListenAddr))
for _, addr := range cfg.HTTPListenAddr {
httpAddresses = append(httpAddresses, addr.String())
}
libp2pAddresses := make([]string, 0, len(cfg.Libp2pListenAddr))
for _, addr := range cfg.Libp2pListenAddr {
libp2pAddresses = append(libp2pAddresses, addr.String())
}
jcfg = &jsonConfig{
HTTPListenMultiaddress: cfg.HTTPListenAddr.String(),
HTTPListenMultiaddress: httpAddresses,
SSLCertFile: cfg.pathSSLCertFile,
SSLKeyFile: cfg.pathSSLKeyFile,
ReadTimeout: cfg.ReadTimeout.String(),
@ -445,7 +473,7 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
}
if cfg.ID != "" {
jcfg.ID = peer.IDB58Encode(cfg.ID)
jcfg.ID = peer.Encode(cfg.ID)
}
if cfg.PrivateKey != nil {
pkeyBytes, err := cfg.PrivateKey.Bytes()
@ -454,8 +482,8 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
jcfg.PrivateKey = pKey
}
}
if cfg.Libp2pListenAddr != nil {
jcfg.Libp2pListenMultiaddress = cfg.Libp2pListenAddr.String()
if len(libp2pAddresses) > 0 {
jcfg.Libp2pListenMultiaddress = libp2pAddresses
}
return
@ -475,6 +503,16 @@ func (cfg *Config) corsOptions() *cors.Options {
}
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}
func newTLSConfig(certFile, keyFile string) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {

View File

@ -58,7 +58,7 @@ func TestLoadJSON(t *testing.T) {
j := &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.HTTPListenMultiaddress = "abc"
j.HTTPListenMultiaddress = []string{"abc"}
tst, _ := json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
@ -103,7 +103,7 @@ func TestLoadJSON(t *testing.T) {
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.Libp2pListenMultiaddress = "abc"
j.Libp2pListenMultiaddress = []string{"abc"}
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
@ -178,8 +178,8 @@ func TestLibp2pConfig(t *testing.T) {
cfg.ID = pid
cfg.PrivateKey = priv
addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfg.HTTPListenAddr = addr
cfg.Libp2pListenAddr = addr
cfg.HTTPListenAddr = []ma.Multiaddr{addr}
cfg.Libp2pListenAddr = []ma.Multiaddr{addr}
err = cfg.Validate()
if err != nil {
@ -203,7 +203,7 @@ func TestLibp2pConfig(t *testing.T) {
}
defer rest.Shutdown(ctx)
badPid, _ := peer.IDB58Decode("QmTQ6oKHDwFjzr4ihirVCLJe8CxanxD3ZjGRYzubFuNDjE")
badPid, _ := peer.Decode("QmTQ6oKHDwFjzr4ihirVCLJe8CxanxD3ZjGRYzubFuNDjE")
cfg.ID = badPid
err = cfg.Validate()
if err == nil {

View File

@ -27,7 +27,7 @@ import (
"github.com/ipfs/ipfs-cluster/state"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
gopath "github.com/ipfs/go-path"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p-core/host"
@ -38,8 +38,7 @@ import (
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
secio "github.com/libp2p/go-libp2p-secio"
libp2ptls "github.com/libp2p/go-libp2p-tls"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
handlers "github.com/gorilla/handlers"
mux "github.com/gorilla/mux"
@ -73,9 +72,6 @@ var (
// Used by sendResponse to set the right status
const autoStatus = -1
// For making a random sharding ID
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
// API implements an API and aims to provides
// a RESTful HTTP API for Cluster.
type API struct {
@ -91,7 +87,7 @@ type API struct {
server *http.Server
host host.Host
httpListener net.Listener
httpListeners []net.Listener
libp2pListener net.Listener
shutdownLock sync.Mutex
@ -187,19 +183,19 @@ func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error)
}
api.addRoutes(router)
// Set up api.httpListener if enabled
// Set up api.httpListeners if enabled
err = api.setupHTTP()
if err != nil {
return nil, err
}
// Set up api.libp2pListener if enabled
// Set up api.libp2pListeners if enabled
err = api.setupLibp2p()
if err != nil {
return nil, err
}
if api.httpListener == nil && api.libp2pListener == nil {
if len(api.httpListeners) == 0 && api.libp2pListener == nil {
return nil, ErrNoEndpointsEnabled
}
@ -208,39 +204,41 @@ func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error)
}
func (api *API) setupHTTP() error {
if api.config.HTTPListenAddr == nil {
if len(api.config.HTTPListenAddr) == 0 {
return nil
}
n, addr, err := manet.DialArgs(api.config.HTTPListenAddr)
if err != nil {
return err
}
for _, listenMAddr := range api.config.HTTPListenAddr {
n, addr, err := manet.DialArgs(listenMAddr)
if err != nil {
return err
}
var l net.Listener
if api.config.TLS != nil {
l, err = tls.Listen(n, addr, api.config.TLS)
} else {
l, err = net.Listen(n, addr)
var l net.Listener
if api.config.TLS != nil {
l, err = tls.Listen(n, addr, api.config.TLS)
} else {
l, err = net.Listen(n, addr)
}
if err != nil {
return err
}
api.httpListeners = append(api.httpListeners, l)
}
if err != nil {
return err
}
api.httpListener = l
return nil
}
func (api *API) setupLibp2p() error {
// Make new host. Override any provided existing one
// if we have config for a custom one.
if api.config.Libp2pListenAddr != nil {
if len(api.config.Libp2pListenAddr) > 0 {
// We use a new host context. We will call
// Close() on shutdown(). Avoids things like:
// https://github.com/ipfs/ipfs-cluster/issues/853
h, err := libp2p.New(
context.Background(),
libp2p.Identity(api.config.PrivateKey),
libp2p.ListenAddrs([]ma.Multiaddr{api.config.Libp2pListenAddr}...),
libp2p.ListenAddrs(api.config.Libp2pListenAddr...),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
libp2p.Security(secio.ID, secio.New),
libp2p.Transport(libp2pquic.NewTransport),
@ -264,15 +262,20 @@ func (api *API) setupLibp2p() error {
return nil
}
// HTTPAddress returns the HTTP(s) listening address
// HTTPAddresses returns the HTTP(s) listening address
// in host:port format. Useful when configured to start
// on a random port (0). Returns error when the HTTP endpoint
// is not enabled.
func (api *API) HTTPAddress() (string, error) {
if api.httpListener == nil {
return "", ErrHTTPEndpointNotEnabled
func (api *API) HTTPAddresses() ([]string, error) {
if len(api.httpListeners) == 0 {
return nil, ErrHTTPEndpointNotEnabled
}
return api.httpListener.Addr().String(), nil
var addrs []string
for _, l := range api.httpListeners {
addrs = append(addrs, l.Addr().String())
}
return addrs, nil
}
// Host returns the libp2p Host used by the API, if any.
@ -318,7 +321,7 @@ func basicAuthHandler(credentials map[string]string, h http.Handler) http.Handle
logger.Error(err)
return
}
http.Error(w, resp, 401)
http.Error(w, resp, http.StatusUnauthorized)
return
}
@ -334,7 +337,7 @@ func basicAuthHandler(credentials map[string]string, h http.Handler) http.Handle
logger.Error(err)
return
}
http.Error(w, resp, 401)
http.Error(w, resp, http.StatusUnauthorized)
return
}
h.ServeHTTP(w, r)
@ -485,28 +488,38 @@ func (api *API) routes() []route {
}
func (api *API) run(ctx context.Context) {
if api.httpListener != nil {
api.wg.Add(1)
go api.runHTTPServer(ctx)
api.wg.Add(len(api.httpListeners))
for _, l := range api.httpListeners {
go func(l net.Listener) {
defer api.wg.Done()
api.runHTTPServer(ctx, l)
}(l)
}
if api.libp2pListener != nil {
api.wg.Add(1)
go api.runLibp2pServer(ctx)
go func() {
defer api.wg.Done()
api.runLibp2pServer(ctx)
}()
}
}
// runs in goroutine from run()
func (api *API) runHTTPServer(ctx context.Context) {
defer api.wg.Done()
func (api *API) runHTTPServer(ctx context.Context, l net.Listener) {
select {
case <-api.rpcReady:
case <-api.ctx.Done():
return
}
logger.Infof("REST API (HTTP): %s", api.config.HTTPListenAddr)
err := api.server.Serve(api.httpListener)
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
logger.Error(err)
}
logger.Infof("REST API (HTTP): %s", maddr)
err = api.server.Serve(l)
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
@ -514,8 +527,6 @@ func (api *API) runHTTPServer(ctx context.Context) {
// runs in goroutine from run()
func (api *API) runLibp2pServer(ctx context.Context) {
defer api.wg.Done()
select {
case <-api.rpcReady:
case <-api.ctx.Done():
@ -556,9 +567,10 @@ func (api *API) Shutdown(ctx context.Context) error {
// Cancel any outstanding ops
api.server.SetKeepAlivesEnabled(false)
if api.httpListener != nil {
api.httpListener.Close()
for _, l := range api.httpListeners {
l.Close()
}
if api.libp2pListener != nil {
api.libp2pListener.Close()
}
@ -690,8 +702,6 @@ func (api *API) addHandler(w http.ResponseWriter, r *http.Request) {
w,
nil,
)
return
}
func (api *API) peerListHandler(w http.ResponseWriter, r *http.Request) {
@ -719,7 +729,7 @@ func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
return
}
pid, err := peer.IDB58Decode(addInfo.PeerID)
pid, err := peer.Decode(addInfo.PeerID)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding peer_id"), nil)
return
@ -969,7 +979,7 @@ func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
api.sendResponse(w, autoStatus, err, pinInfo.ToGlobal())
} else {
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
@ -1028,7 +1038,7 @@ func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
pin.Cid,
&pinInfo,
)
api.sendResponse(w, autoStatus, err, pinInfoToGlobal(&pinInfo))
api.sendResponse(w, autoStatus, err, pinInfo.ToGlobal())
} else {
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
@ -1078,7 +1088,7 @@ func (api *API) repoGCHandler(w http.ResponseWriter, r *http.Request) {
func repoGCToGlobal(r *types.RepoGC) types.GlobalRepoGC {
return types.GlobalRepoGC{
PeerMap: map[string]*types.RepoGC{
peer.IDB58Encode(r.Peer): r,
peer.Encode(r.Peer): r,
},
}
}
@ -1128,7 +1138,7 @@ func (api *API) parseCidOrError(w http.ResponseWriter, r *http.Request) *types.P
func (api *API) parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID {
vars := mux.Vars(r)
idStr := vars["peer"]
pid, err := peer.IDB58Decode(idStr)
pid, err := peer.Decode(idStr)
if err != nil {
api.sendResponse(w, http.StatusBadRequest, errors.New("error decoding Peer ID: "+err.Error()), nil)
return ""
@ -1136,19 +1146,10 @@ func (api *API) parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID
return pid
}
func pinInfoToGlobal(pInfo *types.PinInfo) *types.GlobalPinInfo {
return &types.GlobalPinInfo{
Cid: pInfo.Cid,
PeerMap: map[string]*types.PinInfo{
peer.IDB58Encode(pInfo.Peer): pInfo,
},
}
}
func pinInfosToGlobal(pInfos []*types.PinInfo) []*types.GlobalPinInfo {
gPInfos := make([]*types.GlobalPinInfo, len(pInfos), len(pInfos))
gPInfos := make([]*types.GlobalPinInfo, len(pInfos))
for i, p := range pInfos {
gPInfos[i] = pinInfoToGlobal(p)
gPInfos[i] = p.ToGlobal()
}
return gPInfos
}

View File

@ -50,7 +50,7 @@ func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API {
t.Fatal(err)
}
cfg.HTTPListenAddr = apiMAddr
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
rest, err := NewAPIWithHost(ctx, cfg, h)
if err != nil {
@ -174,17 +174,17 @@ func makeHost(t *testing.T, rest *API) host.Host {
type urlF func(a *API) string
func httpURL(a *API) string {
u, _ := a.HTTPAddress()
return fmt.Sprintf("http://%s", u)
u, _ := a.HTTPAddresses()
return fmt.Sprintf("http://%s", u[0])
}
func p2pURL(a *API) string {
return fmt.Sprintf("libp2p://%s", peer.IDB58Encode(a.Host().ID()))
return fmt.Sprintf("libp2p://%s", peer.Encode(a.Host().ID()))
}
func httpsURL(a *API) string {
u, _ := a.HTTPAddress()
return fmt.Sprintf("https://%s", u)
u, _ := a.HTTPAddresses()
return fmt.Sprintf("https://%s", u[0])
}
func isHTTPS(url string) bool {
@ -558,10 +558,10 @@ func TestConnectGraphEndpoint(t *testing.T) {
// test a few link values
pid1 := test.PeerID1
pid4 := test.PeerID4
if _, ok := cg.ClustertoIPFS[peer.IDB58Encode(pid1)]; !ok {
if _, ok := cg.ClustertoIPFS[peer.Encode(pid1)]; !ok {
t.Fatal("missing cluster peer 1 from cluster to peer links map")
}
if cg.ClustertoIPFS[peer.IDB58Encode(pid1)] != pid4 {
if cg.ClustertoIPFS[peer.Encode(pid1)] != pid4 {
t.Error("unexpected ipfs peer mapped to cluster peer 1 in graph")
}
}
@ -876,7 +876,7 @@ func TestAPIStatusAllEndpoint(t *testing.T) {
if len(resp) != 3 ||
!resp[0].Cid.Equals(test.Cid1) ||
resp[1].PeerMap[peer.IDB58Encode(test.PeerID1)].Status.String() != "pinning" {
resp[1].PeerMap[peer.Encode(test.PeerID1)].Status.String() != "pinning" {
t.Errorf("unexpected statusAll resp")
}
@ -940,7 +940,7 @@ func TestAPIStatusEndpoint(t *testing.T) {
if !resp.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[peer.IDB58Encode(test.PeerID1)]
info, ok := resp.PeerMap[peer.Encode(test.PeerID1)]
if !ok {
t.Fatal("expected info for test.PeerID1")
}
@ -955,7 +955,7 @@ func TestAPIStatusEndpoint(t *testing.T) {
if !resp2.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[peer.IDB58Encode(test.PeerID2)]
info, ok = resp2.PeerMap[peer.Encode(test.PeerID2)]
if !ok {
t.Fatal("expected info for test.PeerID2")
}
@ -979,7 +979,7 @@ func TestAPIRecoverEndpoint(t *testing.T) {
if !resp.Cid.Equals(test.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[peer.IDB58Encode(test.PeerID1)]
info, ok := resp.PeerMap[peer.Encode(test.PeerID1)]
if !ok {
t.Fatal("expected info for test.PeerID1")
}
@ -1160,7 +1160,7 @@ func TestCORS(t *testing.T) {
reqHeaders.Set("Access-Control-Request-Headers", "Content-Type")
for _, tc := range []testcase{
testcase{"GET", "/pins"},
{"GET", "/pins"},
// testcase{},
} {
reqHeaders.Set("Access-Control-Request-Method", tc.method)
@ -1291,116 +1291,116 @@ func TestBasicAuth(t *testing.T) {
defer rest.Shutdown(ctx)
for _, tc := range []httpTestcase{
httpTestcase{},
httpTestcase{
{},
{
method: "",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "POST",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "DELETE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "HEAD",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "OPTIONS",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "PUT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "TRACE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "CONNECT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "BAR",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(invalidUserName, invalidUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, invalidUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(invalidUserName, validUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(adminUserName, validUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
httpTestcase{
{
method: "POST",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
httpTestcase{
{
method: "DELETE",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
httpTestcase{
{
method: "BAR",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
httpTestcase{
{
method: "GET",
path: "/id",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
@ -1421,13 +1421,13 @@ func TestLimitMaxHeaderSize(t *testing.T) {
defer rest.Shutdown(ctx)
for _, tc := range []httpTestcase{
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeLongHeaderShaper(maxHeaderBytes * 2),
checker: assertHTTPStatusIsTooLarge,
},
httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeLongHeaderShaper(maxHeaderBytes / 2),

View File

@ -20,7 +20,7 @@ import (
pb "github.com/ipfs/ipfs-cluster/api/pb"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
multiaddr "github.com/multiformats/go-multiaddr"
@ -30,8 +30,8 @@ import (
// needed to parse /dns* multiaddresses
_ "github.com/multiformats/go-multiaddr-dns"
proto "github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
proto "google.golang.org/protobuf/proto"
)
var logger = logging.Logger("apitypes")
@ -215,7 +215,7 @@ func IPFSPinStatusFromString(t string) IPFSPinStatus {
// IsPinned returns true if the item is pinned as expected by the
// maxDepth parameter.
func (ips IPFSPinStatus) IsPinned(maxDepth int) bool {
func (ips IPFSPinStatus) IsPinned(maxDepth PinDepth) bool {
switch {
case maxDepth < 0:
return ips == IPFSPinStatusRecursive
@ -246,11 +246,12 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid cid.Cid `json:"cid" codec:"c"`
Cid cid.Cid `json:"cid" codec:"c"`
Name string `json:"name" codec:"n"`
// https://github.com/golang/go/issues/28827
// Peer IDs are of string Kind(). We can't use peer IDs here
// as Go ignores TextMarshaler.
PeerMap map[string]*PinInfo `json:"peer_map" codec:"pm,omitempty"`
PeerMap map[string]*PinInfoShort `json:"peer_map" codec:"pm,omitempty"`
}
// String returns the string representation of a GlobalPinInfo.
@ -263,16 +264,47 @@ func (gpi *GlobalPinInfo) String() string {
return str
}
// PinInfo holds information about local pins.
type PinInfo struct {
Cid cid.Cid `json:"cid" codec:"c"`
Peer peer.ID `json:"peer" codec:"p,omitempty"`
// Add adds a PinInfo object to a GlobalPinInfo
func (gpi *GlobalPinInfo) Add(pi *PinInfo) {
if !gpi.Cid.Defined() {
gpi.Cid = pi.Cid
gpi.Name = pi.Name
}
if gpi.PeerMap == nil {
gpi.PeerMap = make(map[string]*PinInfoShort)
}
gpi.PeerMap[peer.Encode(pi.Peer)] = &pi.PinInfoShort
}
// PinInfoShort is a subset of PinInfo which is embedded in GlobalPinInfo
// objects and does not carry redundant information as PinInfo would.
type PinInfoShort struct {
PeerName string `json:"peername" codec:"pn,omitempty"`
Status TrackerStatus `json:"status" codec:"st,omitempty"`
TS time.Time `json:"timestamp" codec:"ts,omitempty"`
Error string `json:"error" codec:"e,omitempty"`
}
// PinInfo holds information about local pins. This is used by the Pin
// Trackers.
type PinInfo struct {
Cid cid.Cid `json:"cid" codec:"c"`
Name string `json:"name" codec:"m,omitempty"`
Peer peer.ID `json:"Peer" codec:"p,omitempty"`
PinInfoShort
}
// ToGlobal converts a PinInfo object to a GlobalPinInfo with
// a single peer corresponding to the given PinInfo.
func (pi *PinInfo) ToGlobal() *GlobalPinInfo {
gpi := GlobalPinInfo{}
gpi.Add(pi)
return &gpi
}
// Version holds version information
type Version struct {
Version string `json:"version" codec:"v"`
@ -462,11 +494,76 @@ func (pT PinType) String() string {
var pinOptionsMetaPrefix = "meta-"
// PinMode is a PinOption that indicates how to pin something on IPFS,
// recursively or direct.
type PinMode int
// PinMode values
const (
PinModeRecursive PinMode = 0
PinModeDirect PinMode = 1
)
// PinModeFromString converst a string to PinMode.
func PinModeFromString(s string) PinMode {
switch s {
case "recursive", "":
return PinModeRecursive
case "direct":
return PinModeDirect
default:
logger.Warnf("unknown pin mode %s. Defaulting to recursive", s)
return PinModeRecursive
}
}
// String returns a human-readable value for PinMode.
func (pm PinMode) String() string {
switch pm {
case PinModeRecursive:
return "recursive"
case PinModeDirect:
return "direct"
default:
return "recursive"
}
}
// MarshalJSON converts the PinMode into a readable string in JSON.
func (pm PinMode) MarshalJSON() ([]byte, error) {
return json.Marshal(pm.String())
}
// UnmarshalJSON takes a JSON value and parses it into PinMode.
func (pm *PinMode) UnmarshalJSON(b []byte) error {
var s string
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
*pm = PinModeFromString(s)
return nil
}
// ToPinDepth converts the Mode to Depth.
func (pm PinMode) ToPinDepth() PinDepth {
switch pm {
case PinModeRecursive:
return -1
case PinModeDirect:
return 0
default:
logger.Warn("unknown pin mode %d. Defaulting to -1 depth", pm)
return -1
}
}
// PinOptions wraps user-defined options for Pins
type PinOptions struct {
ReplicationFactorMin int `json:"replication_factor_min" codec:"rn,omitempty"`
ReplicationFactorMax int `json:"replication_factor_max" codec:"rx,omitempty"`
Name string `json:"name" codec:"n,omitempty"`
Mode PinMode `json:"mode" codec:"o,omitempty"`
ShardSize uint64 `json:"shard_size" codec:"s,omitempty"`
UserAllocations []peer.ID `json:"user_allocations" codec:"ua,omitempty"`
ExpireAt time.Time `json:"expire_at" codec:"e,omitempty"`
@ -489,6 +586,10 @@ func (po *PinOptions) Equals(po2 *PinOptions) bool {
return false
}
if po.Mode != po2.Mode {
return false
}
if po.ReplicationFactorMax != po2.ReplicationFactorMax {
return false
}
@ -538,6 +639,7 @@ func (po *PinOptions) ToQuery() (string, error) {
q.Set("replication-min", fmt.Sprintf("%d", po.ReplicationFactorMin))
q.Set("replication-max", fmt.Sprintf("%d", po.ReplicationFactorMax))
q.Set("name", po.Name)
q.Set("mode", po.Mode.String())
q.Set("shard-size", fmt.Sprintf("%d", po.ShardSize))
q.Set("user-allocations", strings.Join(PeersToStrings(po.UserAllocations), ","))
if !po.ExpireAt.IsZero() {
@ -562,6 +664,9 @@ func (po *PinOptions) ToQuery() (string, error) {
// FromQuery is the inverse of ToQuery().
func (po *PinOptions) FromQuery(q url.Values) error {
po.Name = q.Get("name")
po.Mode = PinModeFromString(q.Get("mode"))
rplStr := q.Get("replication")
if rplStr != "" { // override
q.Set("replication-min", rplStr)
@ -631,6 +736,23 @@ func (po *PinOptions) FromQuery(q url.Values) error {
return nil
}
// PinDepth indicates how deep a pin should be pinned, with
// -1 meaning "to the bottom", or "recursive".
type PinDepth int
// ToPinMode converts PinDepth to PinMode
func (pd PinDepth) ToPinMode() PinMode {
switch pd {
case -1:
return PinModeRecursive
case 0:
return PinModeDirect
default:
logger.Warnf("bad pin depth: %d", pd)
return PinModeRecursive
}
}
// Pin carries all the information associated to a CID that is pinned
// in IPFS Cluster. It also carries transient information (that may not
// get protobuffed, like UserAllocations).
@ -647,7 +769,7 @@ type Pin struct {
// MaxDepth associated to this pin. -1 means
// recursive.
MaxDepth int `json:"max_depth" codec:"d,omitempty"`
MaxDepth PinDepth `json:"max_depth" codec:"d,omitempty"`
// We carry a reference CID to this pin. For
// ClusterDAGs, it is the MetaPin CID. For the
@ -683,15 +805,17 @@ func PinCid(c cid.Cid) *Pin {
Cid: c,
Type: DataType,
Allocations: []peer.ID{},
MaxDepth: -1,
MaxDepth: -1, // Recursive
}
}
// PinWithOpts creates a new Pin calling PinCid(c) and then sets
// its PinOptions fields with the given options.
// PinWithOpts creates a new Pin calling PinCid(c) and then sets its
// PinOptions fields with the given options. Pin fields that are linked to
// options are set accordingly (MaxDepth from Mode).
func PinWithOpts(c cid.Cid, opts PinOptions) *Pin {
p := PinCid(c)
p.PinOptions = opts
p.MaxDepth = p.Mode.ToPinDepth()
return p
}
@ -709,7 +833,7 @@ func convertPinType(t PinType) pb.Pin_PinType {
// ProtoMarshal marshals this Pin using probobuf.
func (pin *Pin) ProtoMarshal() ([]byte, error) {
allocs := make([][]byte, len(pin.Allocations), len(pin.Allocations))
allocs := make([][]byte, len(pin.Allocations))
for i, pid := range pin.Allocations {
bs, err := pid.Marshal()
if err != nil {
@ -729,10 +853,11 @@ func (pin *Pin) ProtoMarshal() ([]byte, error) {
ReplicationFactorMax: int32(pin.ReplicationFactorMax),
Name: pin.Name,
ShardSize: pin.ShardSize,
Metadata: pin.Metadata,
PinUpdate: pin.PinUpdate.Bytes(),
ExpireAt: expireAtProto,
// Mode: pin.Mode,
// UserAllocations: pin.UserAllocations,
Metadata: pin.Metadata,
PinUpdate: pin.PinUpdate.Bytes(),
ExpireAt: expireAtProto,
}
pbPin := &pb.Pin{
@ -766,7 +891,7 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
pbAllocs := pbPin.GetAllocations()
lenAllocs := len(pbAllocs)
allocs := make([]peer.ID, lenAllocs, lenAllocs)
allocs := make([]peer.ID, lenAllocs)
for i, pidb := range pbAllocs {
pid, err := peer.IDFromBytes(pidb)
if err != nil {
@ -776,7 +901,7 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
}
pin.Allocations = allocs
pin.MaxDepth = int(pbPin.GetMaxDepth())
pin.MaxDepth = PinDepth(pbPin.GetMaxDepth())
ref, err := cid.Cast(pbPin.GetReference())
if err != nil {
pin.Reference = nil
@ -800,6 +925,10 @@ func (pin *Pin) ProtoUnmarshal(data []byte) error {
if err == nil {
pin.PinUpdate = pinUpdate
}
// We do not store the PinMode option but we can
// derive it from the MaxDepth setting.
pin.Mode = pin.MaxDepth.ToPinMode()
return nil
}
@ -883,7 +1012,6 @@ type NodeWithMeta struct {
Data []byte `codec:"d,omitempty"`
Cid cid.Cid `codec:"c,omitempty"`
CumSize uint64 `codec:"s,omitempty"` // Cumulative size
Format string `codec:"f,omitempty"`
}
// Size returns how big is the block. It is different from CumSize, which
@ -915,7 +1043,7 @@ func (m *Metric) SetTTL(d time.Duration) {
// GetTTL returns the time left before the Metric expires
func (m *Metric) GetTTL() time.Duration {
expDate := time.Unix(0, m.Expire)
return expDate.Sub(time.Now())
return time.Until(expDate)
}
// Expired returns if the Metric has expired

View File

@ -10,26 +10,10 @@ import (
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/ugorji/go/codec"
)
var testTime = time.Date(2017, 12, 31, 15, 45, 50, 0, time.UTC)
var testMAddr, _ = ma.NewMultiaddr("/ip4/1.2.3.4")
var testMAddr2, _ = ma.NewMultiaddr("/dns4/a.b.c.d")
var testMAddr3, _ = ma.NewMultiaddr("/ip4/127.0.0.1/tcp/8081/ws/p2p/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd")
var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
var testCid2, _ = cid.Decode("QmYCLpFCj9Av8NFjkQogvtXspnTDFWaizLpVFEijHTH4eV")
var testCid3, _ = cid.Decode("QmZmdA3UZKuHuy9FrWsxJ82q21nbEh97NUnxTzF5EHxZia")
var testCid4, _ = cid.Decode("QmZbNfi13Sb2WUDMjiW1ZNhnds5KDk6mJB5hP9B5h9m5CJ")
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
var testPeerID2, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabd")
var testPeerID3, _ = peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
var testPeerID4, _ = peer.IDB58Decode("QmZ8naDy5mEz4GLuQwjWt9MPYqHTBbsm8tQBrNSjiq6zBc")
var testPeerID5, _ = peer.IDB58Decode("QmZVAo3wd8s5eTTy2kPYs34J9PvfxpKPuYsePPYGjgRRjg")
var testPeerID6, _ = peer.IDB58Decode("QmR8Vu6kZk7JvAN2rWVWgiduHatgBq2bb15Yyq8RRhYSbx")
func TestTrackerFromString(t *testing.T) {
testcases := []string{"cluster_error", "pin_error", "unpin_error", "pinned", "pinning", "unpinning", "unpinned", "remote"}
for i, tc := range testcases {
@ -175,7 +159,7 @@ func TestDupTags(t *testing.T) {
func TestPinOptionsQuery(t *testing.T) {
testcases := []*PinOptions{
&PinOptions{
{
ReplicationFactorMax: 3,
ReplicationFactorMin: 2,
Name: "abc",
@ -190,7 +174,7 @@ func TestPinOptionsQuery(t *testing.T) {
"hello2": "bye2",
},
},
&PinOptions{
{
ReplicationFactorMax: -1,
ReplicationFactorMin: 0,
Name: "",
@ -198,7 +182,7 @@ func TestPinOptionsQuery(t *testing.T) {
UserAllocations: []peer.ID{},
Metadata: nil,
},
&PinOptions{
{
ReplicationFactorMax: -1,
ReplicationFactorMin: 0,
Name: "",
@ -231,9 +215,9 @@ func TestPinOptionsQuery(t *testing.T) {
}
func TestIDCodec(t *testing.T) {
TestPeerID1, _ := peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ := peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ := peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
TestPeerID1, _ := peer.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ := peer.Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ := peer.Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
addr, _ := NewMultiaddr("/ip4/1.2.3.4")
id := &ID{
ID: TestPeerID1,

View File

@ -4,12 +4,12 @@ import (
peer "github.com/libp2p/go-libp2p-core/peer"
)
// PeersToStrings IDB58Encodes a list of peers.
// PeersToStrings Encodes a list of peers.
func PeersToStrings(peers []peer.ID) []string {
strs := make([]string, len(peers))
for i, p := range peers {
if p != "" {
strs[i] = peer.IDB58Encode(p)
strs[i] = peer.Encode(p)
}
}
return strs
@ -19,7 +19,7 @@ func PeersToStrings(peers []peer.ID) []string {
func StringsToPeers(strs []string) []peer.ID {
peers := []peer.ID{}
for _, p := range strs {
pid, err := peer.IDB58Decode(p)
pid, err := peer.Decode(p)
if err != nil {
logger.Debugf("'%s': %s", p, err)
continue

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"mime/multipart"
"sort"
"sync"
"time"
@ -24,7 +23,7 @@ import (
peer "github.com/libp2p/go-libp2p-core/peer"
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
rpc "github.com/libp2p/go-libp2p-gorpc"
dht "github.com/libp2p/go-libp2p-kad-dht"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
"github.com/libp2p/go-libp2p/p2p/discovery"
ma "github.com/multiformats/go-multiaddr"
@ -58,7 +57,7 @@ type Cluster struct {
id peer.ID
config *Config
host host.Host
dht *dht.IpfsDHT
dht *dual.DHT
discovery discovery.Service
datastore ds.Datastore
@ -101,7 +100,7 @@ type Cluster struct {
func NewCluster(
ctx context.Context,
host host.Host,
dht *dht.IpfsDHT,
dht *dual.DHT,
cfg *Config,
datastore ds.Datastore,
consensus Consensus,
@ -186,13 +185,10 @@ func NewCluster(
// visible as peers without having to wait for them to send one.
for _, p := range connectedPeers {
if err := c.logPingMetric(ctx, p); err != nil {
logger.Warning(err)
logger.Warn(err)
}
}
// Bootstrap the DHT now that we possibly have some connections
c.dht.Bootstrap(c.ctx)
// After setupRPC components can do their tasks with a fully operative
// routed libp2p host with some connections and a working DHT (hopefully).
err = c.setupRPC()
@ -422,7 +418,7 @@ func (c *Cluster) alertsHandler() {
continue
}
logger.Warningf("metric alert for %s: Peer: %s.", alrt.MetricName, alrt.Peer)
logger.Warnf("metric alert for %s: Peer: %s.", alrt.MetricName, alrt.Peer)
c.alertsMux.Lock()
for pID, alert := range c.alerts {
if time.Now().After(time.Unix(0, alert.Expiry)) {
@ -443,21 +439,23 @@ func (c *Cluster) alertsHandler() {
cState, err := c.consensus.State(c.ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
return
}
list, err := cState.List(c.ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
return
}
distance, err := c.distances(c.ctx, alrt.Peer)
if err != nil {
logger.Warn(err)
return
}
for _, pin := range list {
if len(pin.Allocations) == 1 && containsPeer(pin.Allocations, alrt.Peer) {
logger.Warning("a pin with only one allocation cannot be repinned")
logger.Warning("to make repinning possible, pin with a replication factor of 2+")
continue
}
if c.shouldPeerRepinCid(alrt.Peer, pin) {
if containsPeer(pin.Allocations, alrt.Peer) && distance.isClosest(pin.Cid) {
c.repinFromPeer(c.ctx, alrt.Peer, pin)
}
}
@ -465,25 +463,6 @@ func (c *Cluster) alertsHandler() {
}
}
// shouldPeerRepinCid returns true if the current peer is the top of the
// allocs list. The failed peer is ignored, i.e. if current peer is
// second and the failed peer is first, the function will still
// return true.
func (c *Cluster) shouldPeerRepinCid(failed peer.ID, pin *api.Pin) bool {
if containsPeer(pin.Allocations, failed) && containsPeer(pin.Allocations, c.id) {
allocs := peer.IDSlice(pin.Allocations)
sort.Sort(allocs)
if allocs[0] == c.id {
return true
}
if allocs[1] == c.id && allocs[0] == failed {
return true
}
}
return false
}
// detects any changes in the peerset and saves the configuration. When it
// detects that we have been removed from the peerset, it shuts down this peer.
func (c *Cluster) watchPeers() {
@ -521,7 +500,7 @@ func (c *Cluster) watchPeers() {
}
}
// reBootstrap reguarly attempts to bootstrap (re-connect to peers from the
// reBootstrap regularly attempts to bootstrap (re-connect to peers from the
// peerstore). This should ensure that we auto-recover from situations in
// which the network was completely gone and we lost all peers.
func (c *Cluster) reBootstrap() {
@ -547,18 +526,18 @@ func (c *Cluster) vacatePeer(ctx context.Context, p peer.ID) {
defer span.End()
if c.config.DisableRepinning {
logger.Warningf("repinning is disabled. Will not re-allocate cids from %s", p.Pretty())
logger.Warnf("repinning is disabled. Will not re-allocate cids from %s", p.Pretty())
return
}
cState, err := c.consensus.State(ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
return
}
list, err := cState.List(ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
return
}
for _, pin := range list {
@ -696,10 +675,10 @@ func (c *Cluster) Ready() <-chan struct{} {
// * Save peerstore with the current peers
// * Remove itself from consensus when LeaveOnShutdown is set
// * It Shutdowns all the components
// * Closes the datastore
// * Collects all goroutines
//
// Shutdown does not closes the libp2p host or the DHT.
// Shutdown does not close the libp2p host, the DHT, the datastore or
// generally anything that Cluster did not create.
func (c *Cluster) Shutdown(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "cluster/Shutdown")
defer span.End()
@ -737,7 +716,7 @@ func (c *Cluster) Shutdown(ctx context.Context) error {
_, err := c.consensus.Peers(ctx)
if err == nil {
// best effort
logger.Warning("attempting to leave the cluster. This may take some seconds")
logger.Warn("attempting to leave the cluster. This may take some seconds")
err := c.consensus.RmPeer(ctx, c.id)
if err != nil {
logger.Error("leaving cluster: " + err.Error())
@ -798,12 +777,6 @@ func (c *Cluster) Shutdown(ctx context.Context) error {
c.cancel()
c.wg.Wait()
// Cleanly close the datastore
if err := c.datastore.Close(); err != nil {
logger.Errorf("error closing Datastore: %s", err)
return err
}
c.shutdownB = true
close(c.doneCh)
return nil
@ -980,18 +953,18 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
// we know that peer since we have metrics for it without
// having to wait for the next metric round.
if err := c.logPingMetric(ctx, pid); err != nil {
logger.Warning(err)
logger.Warn(err)
}
// Broadcast our metrics to the world
_, err = c.sendInformersMetrics(ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
}
_, err = c.sendPingMetric(ctx)
if err != nil {
logger.Warning(err)
logger.Warn(err)
}
// We need to trigger a DHT bootstrap asap for this peer to not be
@ -999,8 +972,30 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
// by triggering 1 round of bootstrap in the background.
// Note that our regular bootstrap process is still running in the
// background since we created the cluster.
c.wg.Add(1)
go func() {
c.dht.BootstrapOnce(ctx, dht.DefaultBootstrapConfig)
defer c.wg.Done()
select {
case err := <-c.dht.LAN.RefreshRoutingTable():
if err != nil {
// this error is quite chatty
// on single peer clusters
logger.Debug(err)
}
case <-c.ctx.Done():
return
}
select {
case err := <-c.dht.WAN.RefreshRoutingTable():
if err != nil {
// this error is quite chatty
// on single peer clusters
logger.Debug(err)
}
case <-c.ctx.Done():
return
}
}()
// ConnectSwarms in the background after a while, when we have likely
@ -1024,6 +1019,22 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
return nil
}
// Distances returns a distance checker using current trusted peers.
// It can optionally receive a peer ID to exclude from the checks.
func (c *Cluster) distances(ctx context.Context, exclude peer.ID) (*distanceChecker, error) {
trustedPeers, err := c.getTrustedPeers(ctx, exclude)
if err != nil {
logger.Error("could not get trusted peers:", err)
return nil, err
}
return &distanceChecker{
local: c.id,
otherPeers: trustedPeers,
cache: make(map[peer.ID]distance, len(trustedPeers)+1),
}, nil
}
// StateSync performs maintenance tasks on the global state that require
// looping through all the items. It is triggered automatically on
// StateSyncInterval. Currently it:
@ -1053,20 +1064,14 @@ func (c *Cluster) StateSync(ctx context.Context) error {
// other trusted peers. We cannot know if our peer ID is trusted by
// other peers in the Cluster. This assumes yes. Setting FollowerMode
// is a way to assume the opposite and skip this completely.
trustedPeers, err := c.getTrustedPeers(ctx)
distance, err := c.distances(ctx, "")
if err != nil {
return nil
}
checker := distanceChecker{
local: c.id,
otherPeers: trustedPeers,
cache: make(map[peer.ID]distance, len(trustedPeers)+1),
return err // could not list peers
}
// Unpin expired items when we are the closest peer to them.
for _, p := range clusterPins {
if p.ExpiredAt(timeNow) && checker.isClosest(p.Cid) {
if p.ExpiredAt(timeNow) && distance.isClosest(p.Cid) {
logger.Infof("Unpinning %s: pin expired at %s", p.Cid, p.ExpireAt)
if _, err := c.Unpin(ctx, p.Cid); err != nil {
logger.Error(err)
@ -1326,8 +1331,8 @@ func checkPinType(pin *api.Pin) error {
// setupPin ensures that the Pin object is fit for pinning. We check
// and set the replication factors and ensure that the pinType matches the
// metadata consistently.
func (c *Cluster) setupPin(ctx context.Context, pin *api.Pin) error {
ctx, span := trace.StartSpan(ctx, "cluster/setupPin")
func (c *Cluster) setupPin(ctx context.Context, pin, existing *api.Pin) error {
_, span := trace.StartSpan(ctx, "cluster/setupPin")
defer span.End()
err := c.setupReplicationFactor(pin)
@ -1339,18 +1344,24 @@ func (c *Cluster) setupPin(ctx context.Context, pin *api.Pin) error {
return errors.New("pin.ExpireAt set before current time")
}
existing, err := c.PinGet(ctx, pin.Cid)
if err != nil && err != state.ErrNotFound {
return err
if existing == nil {
return nil
}
if existing != nil && existing.Type != pin.Type {
// If an pin CID is already pin, we do a couple more checks
if existing.Type != pin.Type {
msg := "cannot repin CID with different tracking method, "
msg += "clear state with pin rm to proceed. "
msg += "New: %s. Was: %s"
return fmt.Errorf(msg, pin.Type, existing.Type)
}
if existing.Mode == api.PinModeRecursive && pin.Mode != api.PinModeRecursive {
msg := "cannot repin a CID which is already pinned in "
msg += "recursive mode (new pin is pinned as %s). Unpin it first."
return fmt.Errorf(msg, pin.Mode)
}
return checkPinType(pin)
}
@ -1382,8 +1393,13 @@ func (c *Cluster) pin(
return pin, true, err
}
existing, err := c.PinGet(ctx, pin.Cid)
if err != nil && err != state.ErrNotFound {
return pin, false, err
}
// setup pin might produce some side-effects to our pin
err := c.setupPin(ctx, pin)
err = c.setupPin(ctx, pin, existing)
if err != nil {
return pin, false, err
}
@ -1396,8 +1412,7 @@ func (c *Cluster) pin(
// pins to the consensus layer even if they are, this should trigger the
// pin tracker and allows users to get re-pin operations by re-adding
// without having to use recover, which is naturally expected.
existing, err := c.PinGet(ctx, pin.Cid)
if err == nil &&
if existing != nil &&
pin.PinOptions.Equals(&existing.PinOptions) &&
len(blacklist) == 0 {
pin = existing
@ -1413,6 +1428,7 @@ func (c *Cluster) pin(
allocs, err := c.allocate(
ctx,
pin.Cid,
existing,
pin.ReplicationFactorMin,
pin.ReplicationFactorMax,
blacklist,
@ -1458,7 +1474,7 @@ func (c *Cluster) Unpin(ctx context.Context, h cid.Cid) (*api.Pin, error) {
case api.DataType:
return pin, c.consensus.LogUnpin(ctx, pin)
case api.ShardType:
err := "cannot unpin a shard direclty. Unpin content root CID instead."
err := "cannot unpin a shard directly. Unpin content root CID instead"
return pin, errors.New(err)
case api.MetaType:
// Unpin cluster dag and referenced shards
@ -1468,7 +1484,7 @@ func (c *Cluster) Unpin(ctx context.Context, h cid.Cid) (*api.Pin, error) {
}
return pin, c.consensus.LogUnpin(ctx, pin)
case api.ClusterDAGType:
err := "cannot unpin a Cluster DAG directly. Unpin content root CID instead."
err := "cannot unpin a Cluster DAG directly. Unpin content root CID instead"
return pin, errors.New(err)
default:
return pin, errors.New("unrecognized pin type")
@ -1526,7 +1542,9 @@ func (c *Cluster) PinUpdate(ctx context.Context, from cid.Cid, to cid.Cid, opts
if opts.Name != "" {
existing.Name = opts.Name
}
if !opts.ExpireAt.IsZero() && opts.ExpireAt.After(time.Now()) {
existing.ExpireAt = opts.ExpireAt
}
return existing, c.consensus.LogPin(ctx, existing)
}
@ -1596,7 +1614,7 @@ func (c *Cluster) Peers(ctx context.Context) []*api.ID {
}
lenMembers := len(members)
peers := make([]*api.ID, lenMembers, lenMembers)
peers := make([]*api.ID, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1615,6 +1633,7 @@ func (c *Cluster) Peers(ctx context.Context) []*api.ID {
for i, err := range errs {
if err == nil {
finalPeers = append(finalPeers, peers[i])
_ = finalPeers // staticcheck
continue
}
@ -1630,8 +1649,9 @@ func (c *Cluster) Peers(ctx context.Context) []*api.ID {
return peers
}
// getTrustedPeers gives listed of trusted peers except the current peer.
func (c *Cluster) getTrustedPeers(ctx context.Context) ([]peer.ID, error) {
// getTrustedPeers gives listed of trusted peers except the current peer and
// the excluded peer if provided.
func (c *Cluster) getTrustedPeers(ctx context.Context, exclude peer.ID) ([]peer.ID, error) {
peers, err := c.consensus.Peers(ctx)
if err != nil {
return nil, err
@ -1640,7 +1660,7 @@ func (c *Cluster) getTrustedPeers(ctx context.Context) ([]peer.ID, error) {
trustedPeers := make([]peer.ID, 0, len(peers))
for _, p := range peers {
if p == c.id || !c.consensus.IsTrustedPeer(ctx, p) {
if p == c.id || p == exclude || !c.consensus.IsTrustedPeer(ctx, p) {
continue
}
trustedPeers = append(trustedPeers, p)
@ -1649,15 +1669,18 @@ func (c *Cluster) getTrustedPeers(ctx context.Context) ([]peer.ID, error) {
return trustedPeers, nil
}
func setTrackerStatus(gpin *api.GlobalPinInfo, h cid.Cid, peers []peer.ID, status api.TrackerStatus, t time.Time) {
func setTrackerStatus(gpin *api.GlobalPinInfo, h cid.Cid, peers []peer.ID, status api.TrackerStatus, name string, t time.Time) {
for _, p := range peers {
gpin.PeerMap[peer.IDB58Encode(p)] = &api.PinInfo{
Cid: h,
Peer: p,
PeerName: p.String(),
Status: status,
TS: t,
}
gpin.Add(&api.PinInfo{
Cid: h,
Name: name,
Peer: p,
PinInfoShort: api.PinInfoShort{
PeerName: p.String(),
Status: status,
TS: t,
},
})
}
}
@ -1665,19 +1688,54 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoCid")
defer span.End()
gpin := &api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[string]*api.PinInfo),
}
// The object we will return
gpin := &api.GlobalPinInfo{}
// allocated peers, we will contact them through rpc
var dests []peer.ID
// un-allocated peers, we will set remote status
var remote []peer.ID
timeNow := time.Now()
// set dests and remote
// If pin is not part of the pinset, mark it unpinned
pin, err := c.PinGet(ctx, h)
if err != nil && err != state.ErrNotFound {
logger.Error(err)
return nil, err
}
// When NotFound return directly with an unpinned
// status.
if err == state.ErrNotFound {
var members []peer.ID
if c.config.FollowerMode {
members = []peer.ID{c.host.ID()}
} else {
members, err = c.consensus.Peers(ctx)
if err != nil {
logger.Error(err)
return nil, err
}
}
setTrackerStatus(
gpin,
h,
members,
api.TrackerStatusUnpinned,
"",
timeNow,
)
return gpin, nil
}
// The pin exists.
gpin.Cid = h
gpin.Name = pin.Name
// Make the list of peers that will receive the request.
if c.config.FollowerMode {
// during follower mode return status only on self peer
// during follower mode return only local status.
dests = []peer.ID{c.host.ID()}
remote = []peer.ID{}
} else {
@ -1687,17 +1745,6 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
return nil, err
}
// If pin is not part of the pinset, mark it unpinned
pin, err := c.PinGet(ctx, h)
if err == state.ErrNotFound {
setTrackerStatus(gpin, h, members, api.TrackerStatusUnpinned, timeNow)
return gpin, nil
}
if err != nil {
logger.Error(err)
return nil, err
}
if len(pin.Allocations) > 0 {
dests = pin.Allocations
remote = peersSubtract(members, dests)
@ -1708,10 +1755,10 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
}
// set status remote on un-allocated peers
setTrackerStatus(gpin, h, remote, api.TrackerStatusRemote, timeNow)
setTrackerStatus(gpin, h, remote, api.TrackerStatusRemote, pin.Name, timeNow)
lenDests := len(dests)
replies := make([]*api.PinInfo, lenDests, lenDests)
replies := make([]*api.PinInfo, lenDests)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenDests)
defer rpcutil.MultiCancel(cancels)
@ -1729,7 +1776,7 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
// No error. Parse and continue
if e == nil {
gpin.PeerMap[peer.IDB58Encode(dests[i])] = r
gpin.Add(r)
continue
}
@ -1740,14 +1787,17 @@ func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h c
// Deal with error cases (err != nil): wrap errors in PinInfo
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, dests[i], e)
gpin.PeerMap[peer.IDB58Encode(dests[i])] = &api.PinInfo{
Cid: h,
Peer: dests[i],
PeerName: dests[i].String(),
Status: api.TrackerStatusClusterError,
TS: timeNow,
Error: e.Error(),
}
gpin.Add(&api.PinInfo{
Cid: h,
Name: pin.Name,
Peer: dests[i],
PinInfoShort: api.PinInfoShort{
PeerName: dests[i].String(),
Status: api.TrackerStatusClusterError,
TS: timeNow,
Error: e.Error(),
},
})
}
return gpin, nil
@ -1773,7 +1823,7 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string) (
}
lenMembers := len(members)
replies := make([][]*api.PinInfo, lenMembers, lenMembers)
replies := make([][]*api.PinInfo, lenMembers)
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, lenMembers)
defer rpcutil.MultiCancel(cancels)
@ -1787,23 +1837,16 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string) (
rpcutil.CopyPinInfoSliceToIfaces(replies),
)
mergePins := func(pins []*api.PinInfo) {
for _, p := range pins {
if p == nil {
continue
}
item, ok := fullMap[p.Cid]
if !ok {
fullMap[p.Cid] = &api.GlobalPinInfo{
Cid: p.Cid,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(p.Peer): p,
},
}
} else {
item.PeerMap[peer.IDB58Encode(p.Peer)] = p
}
setPinInfo := func(p *api.PinInfo) {
if p == nil {
return
}
info, ok := fullMap[p.Cid]
if !ok {
info = &api.GlobalPinInfo{}
fullMap[p.Cid] = info
}
info.Add(p)
}
erroredPeers := make(map[peer.ID]string)
@ -1815,21 +1858,27 @@ func (c *Cluster) globalPinInfoSlice(ctx context.Context, comp, method string) (
}
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], e)
erroredPeers[members[i]] = e.Error()
} else {
mergePins(r)
continue
}
for _, pin := range r {
setPinInfo(pin)
}
}
// Merge any errors
for p, msg := range erroredPeers {
for c := range fullMap {
fullMap[c].PeerMap[peer.IDB58Encode(p)] = &api.PinInfo{
Cid: c,
Peer: p,
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: msg,
}
setPinInfo(&api.PinInfo{
Cid: c,
Name: "",
Peer: p,
PinInfoShort: api.PinInfoShort{
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: msg,
},
})
}
}
@ -1914,45 +1963,45 @@ func (c *Cluster) cidsFromMetaPin(ctx context.Context, h cid.Cid) ([]cid.Cid, er
return list, nil
}
// diffPeers returns the peerIDs added and removed from peers2 in relation to
// peers1
func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
m1 := make(map[peer.ID]struct{})
m2 := make(map[peer.ID]struct{})
added = make([]peer.ID, 0)
removed = make([]peer.ID, 0)
if peers1 == nil && peers2 == nil {
return
}
if peers1 == nil {
added = peers2
return
}
if peers2 == nil {
removed = peers1
return
}
// // diffPeers returns the peerIDs added and removed from peers2 in relation to
// // peers1
// func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) {
// m1 := make(map[peer.ID]struct{})
// m2 := make(map[peer.ID]struct{})
// added = make([]peer.ID, 0)
// removed = make([]peer.ID, 0)
// if peers1 == nil && peers2 == nil {
// return
// }
// if peers1 == nil {
// added = peers2
// return
// }
// if peers2 == nil {
// removed = peers1
// return
// }
for _, p := range peers1 {
m1[p] = struct{}{}
}
for _, p := range peers2 {
m2[p] = struct{}{}
}
for k := range m1 {
_, ok := m2[k]
if !ok {
removed = append(removed, k)
}
}
for k := range m2 {
_, ok := m1[k]
if !ok {
added = append(added, k)
}
}
return
}
// for _, p := range peers1 {
// m1[p] = struct{}{}
// }
// for _, p := range peers2 {
// m2[p] = struct{}{}
// }
// for k := range m1 {
// _, ok := m2[k]
// if !ok {
// removed = append(removed, k)
// }
// }
// for k := range m2 {
// _, ok := m1[k]
// if !ok {
// added = append(added, k)
// }
// }
// return
// }
// RepoGC performs garbage collection sweep on all peers' IPFS repo.
func (c *Cluster) RepoGC(ctx context.Context) (*api.GlobalRepoGC, error) {
@ -1979,7 +2028,7 @@ func (c *Cluster) RepoGC(ctx context.Context) (*api.GlobalRepoGC, error) {
&repoGC,
)
if err == nil {
globalRepoGC.PeerMap[peer.IDB58Encode(member)] = &repoGC
globalRepoGC.PeerMap[peer.Encode(member)] = &repoGC
continue
}
@ -1990,9 +2039,9 @@ func (c *Cluster) RepoGC(ctx context.Context) (*api.GlobalRepoGC, error) {
logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, member, err)
globalRepoGC.PeerMap[peer.IDB58Encode(member)] = &api.RepoGC{
globalRepoGC.PeerMap[peer.Encode(member)] = &api.RepoGC{
Peer: member,
Peername: peer.IDB58Encode(member),
Peername: peer.Encode(member),
Keys: []api.IPFSRepoGC{},
Error: err.Error(),
}

View File

@ -1,6 +1,7 @@
package ipfscluster
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
@ -8,13 +9,12 @@ import (
"os"
"path/filepath"
"reflect"
"sync"
"time"
"github.com/ipfs/ipfs-cluster/config"
ipfsconfig "github.com/ipfs/go-ipfs-config"
pnet "github.com/libp2p/go-libp2p-pnet"
pnet "github.com/libp2p/go-libp2p-core/pnet"
ma "github.com/multiformats/go-multiaddr"
"github.com/kelseyhightower/envconfig"
@ -22,8 +22,11 @@ import (
const configKey = "cluster"
// DefaultListenAddrs contains TCP and QUIC listen addresses
var DefaultListenAddrs = []string{"/ip4/0.0.0.0/tcp/9096", "/ip4/0.0.0.0/udp/9096/quic"}
// DefaultListenAddrs contains TCP and QUIC listen addresses.
var DefaultListenAddrs = []string{
"/ip4/0.0.0.0/tcp/9096",
"/ip4/0.0.0.0/udp/9096/quic",
}
// Configuration defaults
const (
@ -55,8 +58,6 @@ type ConnMgrConfig struct {
// config.ComponentConfig interface.
type Config struct {
config.Saver
lock sync.Mutex
peerstoreLock sync.Mutex
// User-defined peername for use as human-readable identifier.
Peername string
@ -64,7 +65,7 @@ type Config struct {
// Cluster secret for private network. Peers will be in the same cluster if and
// only if they have the same ClusterSecret. The cluster secret must be exactly
// 64 characters and contain only hexadecimal characters (`[0-9a-f]`).
Secret []byte
Secret pnet.PSK
// RPCPolicy defines access control to RPC endpoints.
RPCPolicy map[string]RPCEndpointType
@ -133,7 +134,7 @@ type Config struct {
// If true, DisableRepinning, ensures that no repinning happens
// when a node goes down.
// This is useful when doing certain types of maintainance, or simply
// This is useful when doing certain types of maintenance, or simply
// when not wanting to rely on the monitoring system which needs a revamp.
DisableRepinning bool
@ -161,8 +162,8 @@ type Config struct {
type configJSON struct {
ID string `json:"id,omitempty"`
Peername string `json:"peername"`
PrivateKey string `json:"private_key,omitempty"`
Secret string `json:"secret"`
PrivateKey string `json:"private_key,omitempty" hidden:"true"`
Secret string `json:"secret" hidden:"true"`
LeaveOnShutdown bool `json:"leave_on_shutdown"`
ListenMultiaddress ipfsconfig.Strings `json:"listen_multiaddress"`
EnableRelayHop bool `json:"enable_relay_hop"`
@ -199,14 +200,16 @@ func (cfg *Config) ConfigKey() string {
func (cfg *Config) Default() error {
cfg.setDefaults()
// cluster secret
clusterSecret, err := pnet.GenerateV1Bytes()
clusterSecret := make([]byte, 32)
n, err := rand.Read(clusterSecret)
if err != nil {
return err
}
cfg.Secret = (*clusterSecret)[:]
// --
if n != 32 {
return errors.New("did not generate 32-byte secret")
}
cfg.Secret = clusterSecret
return nil
}
@ -326,7 +329,7 @@ func isRPCPolicyValid(p map[string]RPCEndpointType) error {
}
}
if len(p) != total {
logger.Warning("defined RPC policy has more entries than needed")
logger.Warn("defined RPC policy has more entries than needed")
}
return nil
}
@ -361,7 +364,7 @@ func (cfg *Config) setDefaults() {
cfg.MDNSInterval = DefaultMDNSInterval
cfg.DisableRepinning = DefaultDisableRepinning
cfg.FollowerMode = DefaultFollowerMode
cfg.PeerstoreFile = "" // empty so it gets ommited.
cfg.PeerstoreFile = "" // empty so it gets omitted.
cfg.PeerAddresses = []ma.Multiaddr{}
cfg.RPCPolicy = DefaultRPCPolicy
}
@ -523,6 +526,15 @@ func (cfg *Config) GetPeerstorePath() string {
return filepath.Join(cfg.BaseDir, filename)
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toConfigJSON()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}
// DecodeClusterSecret parses a hex-encoded string, checks that it is exactly
// 32 bytes long and returns its value as a byte-slice.x
func DecodeClusterSecret(hexSecret string) ([]byte, error) {
@ -532,7 +544,7 @@ func DecodeClusterSecret(hexSecret string) ([]byte, error) {
}
switch secretLen := len(secret); secretLen {
case 0:
logger.Warning("Cluster secret is empty, cluster will start on unprotected network.")
logger.Warn("Cluster secret is empty, cluster will start on unprotected network.")
return nil, nil
case 32:
return secret, nil

View File

@ -38,7 +38,6 @@ func (c *mockComponent) Shutdown(ctx context.Context) error {
func (c *mockComponent) SetClient(client *rpc.Client) {
c.rpcClient = client
return
}
type mockAPI struct {
@ -72,12 +71,12 @@ func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error {
return nil
}
func (ipfs *mockConnector) PinLsCid(ctx context.Context, c cid.Cid) (api.IPFSPinStatus, error) {
dI, ok := ipfs.pins.Load(c.String())
func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin *api.Pin) (api.IPFSPinStatus, error) {
dI, ok := ipfs.pins.Load(pin.Cid.String())
if !ok {
return api.IPFSPinStatusUnpinned, nil
}
depth := dI.(int)
depth := dI.(api.PinDepth)
if depth == 0 {
return api.IPFSPinStatusDirect, nil
}
@ -88,7 +87,7 @@ func (ipfs *mockConnector) PinLs(ctx context.Context, filter string) (map[string
m := make(map[string]api.IPFSPinStatus)
var st api.IPFSPinStatus
ipfs.pins.Range(func(k, v interface{}) bool {
switch v.(int) {
switch v.(api.PinDepth) {
case 0:
st = api.IPFSPinStatusDirect
default:
@ -213,7 +212,7 @@ func cleanState() {
os.RemoveAll(testsFolder)
}
func testClusterShutdown(t *testing.T) {
func TestClusterShutdown(t *testing.T) {
ctx := context.Background()
cl, _, _, _ := testingCluster(t)
err := cl.Shutdown(ctx)
@ -478,7 +477,7 @@ func TestUnpinShard(t *testing.T) {
t.Errorf("%s should have been unpinned but is %s", c, st.Status)
}
st2, err := cl.ipfs.PinLsCid(context.Background(), c)
st2, err := cl.ipfs.PinLsCid(context.Background(), api.PinCid(c))
if err != nil {
t.Fatal(err)
}

View File

@ -4,25 +4,31 @@ import (
"context"
"encoding/hex"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
ipns "github.com/ipfs/go-ipns"
"github.com/ipfs/ipfs-cluster/config"
libp2p "github.com/libp2p/go-libp2p"
autonat "github.com/libp2p/go-libp2p-autonat-svc"
relay "github.com/libp2p/go-libp2p-circuit"
connmgr "github.com/libp2p/go-libp2p-connmgr"
crypto "github.com/libp2p/go-libp2p-core/crypto"
host "github.com/libp2p/go-libp2p-core/host"
corepnet "github.com/libp2p/go-libp2p-core/pnet"
routing "github.com/libp2p/go-libp2p-core/routing"
crypto "github.com/libp2p/go-libp2p-crypto"
host "github.com/libp2p/go-libp2p-host"
dht "github.com/libp2p/go-libp2p-kad-dht"
pnet "github.com/libp2p/go-libp2p-pnet"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
noise "github.com/libp2p/go-libp2p-noise"
pubsub "github.com/libp2p/go-libp2p-pubsub"
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
secio "github.com/libp2p/go-libp2p-secio"
record "github.com/libp2p/go-libp2p-record"
libp2ptls "github.com/libp2p/go-libp2p-tls"
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
)
const dhtNamespace = "dht"
var _ = libp2pquic.NewTransport
func init() {
// Cluster peers should advertise their public IPs as soon as they
// learn about them. Default for this is 4, which prevents clusters
@ -36,44 +42,40 @@ func init() {
// NewClusterHost creates a fully-featured libp2p Host with the options from
// the provided cluster configuration. Using that host, it creates pubsub and
// a DHT instances, for shared use by all cluster components. The returned
// host uses the DHT for routing. The resulting DHT is not bootstrapped. Relay
// and AutoNATService are additionally setup for this host.
// a DHT instances (persisting to the given datastore), for shared use by all
// cluster components. The returned host uses the DHT for routing. Relay and
// NATService are additionally setup for this host.
func NewClusterHost(
ctx context.Context,
ident *config.Identity,
cfg *Config,
) (host.Host, *pubsub.PubSub, *dht.IpfsDHT, error) {
ds datastore.Datastore,
) (host.Host, *pubsub.PubSub, *dual.DHT, error) {
connman := connmgr.NewConnManager(cfg.ConnMgr.LowWater, cfg.ConnMgr.HighWater, cfg.ConnMgr.GracePeriod)
relayOpts := []relay.RelayOpt{relay.OptDiscovery}
relayOpts := []relay.RelayOpt{}
if cfg.EnableRelayHop {
relayOpts = append(relayOpts, relay.OptHop)
}
var idht *dht.IpfsDHT
var idht *dual.DHT
var err error
opts := []libp2p.Option{
libp2p.ListenAddrs(cfg.ListenAddr...),
libp2p.NATPortMap(),
libp2p.ConnectionManager(connman),
libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
idht, err = newDHT(ctx, h)
idht, err = newDHT(ctx, h, ds)
return idht, err
}),
libp2p.EnableRelay(relayOpts...),
libp2p.EnableAutoRelay(),
}
prot, err := newProtector(cfg.Secret)
if err != nil {
return nil, nil, nil, err
}
h, err := newHost(
ctx,
prot,
cfg.Secret,
ident.PrivateKey,
opts...,
)
@ -87,23 +89,16 @@ func NewClusterHost(
return nil, nil, nil, err
}
// needed for auto relay
_, err = autonat.NewAutoNATService(ctx, h, baseOpts(prot)...)
if err != nil {
h.Close()
return nil, nil, nil, err
}
return h, psub, idht, nil
}
// newHost creates a base cluster host without dht, pubsub, relay or nat etc.
// mostly used for testing.
func newHost(ctx context.Context, prot corepnet.Protector, priv crypto.PrivKey, opts ...libp2p.Option) (host.Host, error) {
func newHost(ctx context.Context, psk corepnet.PSK, priv crypto.PrivKey, opts ...libp2p.Option) (host.Host, error) {
finalOpts := []libp2p.Option{
libp2p.Identity(priv),
}
finalOpts = append(finalOpts, baseOpts(prot)...)
finalOpts = append(finalOpts, baseOpts(psk)...)
finalOpts = append(finalOpts, opts...)
h, err := libp2p.New(
@ -117,29 +112,34 @@ func newHost(ctx context.Context, prot corepnet.Protector, priv crypto.PrivKey,
return h, nil
}
func baseOpts(prot corepnet.Protector) []libp2p.Option {
func baseOpts(psk corepnet.PSK) []libp2p.Option {
return []libp2p.Option{
libp2p.PrivateNetwork(prot),
libp2p.PrivateNetwork(psk),
libp2p.EnableNATService(),
libp2p.Security(noise.ID, noise.New),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
libp2p.Security(secio.ID, secio.New),
libp2p.Transport(libp2pquic.NewTransport),
// TODO: quic does not support private networks
// libp2p.Transport(libp2pquic.NewTransport),
libp2p.DefaultTransports,
}
}
func newProtector(secret []byte) (corepnet.Protector, error) {
// Create protector if we have a secret.
if len(secret) == 0 {
return nil, nil
func newDHT(ctx context.Context, h host.Host, store datastore.Datastore, extraopts ...dual.Option) (*dual.DHT, error) {
opts := []dual.Option{
dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})),
dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})),
dual.DHTOption(dht.Concurrency(10)),
}
var key [32]byte
copy(key[:], secret)
return pnet.NewV1ProtectorFromBytes(&key)
}
opts = append(opts, extraopts...)
func newDHT(ctx context.Context, h host.Host) (*dht.IpfsDHT, error) {
return dht.New(ctx, h)
if batchingDs, ok := store.(datastore.Batching); ok {
dhtDatastore := namespace.Wrap(batchingDs, datastore.NewKey(dhtNamespace))
opts = append(opts, dual.DHTOption(dht.Datastore(dhtDatastore)))
logger.Debug("enabling DHT record persistence to datastore")
}
return dual.New(ctx, h, opts...)
}
func newPubSub(ctx context.Context, h host.Host) (*pubsub.PubSub, error) {
@ -151,10 +151,6 @@ func newPubSub(ctx context.Context, h host.Host) (*pubsub.PubSub, error) {
)
}
func routedHost(h host.Host, d *dht.IpfsDHT) host.Host {
return routedhost.Wrap(h, d)
}
// EncodeProtectorKey converts a byte slice to its hex string representation.
func EncodeProtectorKey(secretBytes []byte) string {
return hex.EncodeToString(secretBytes)

View File

@ -1,15 +1,17 @@
# go source files
SRC := $(shell find .. -type f -name '*.go')
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-ctl
ipfs-cluster-ctl: $(SRC)
go build -mod=readonly
go build $(GOFLAGS) -mod=readonly
build: ipfs-cluster-ctl
install:
go install
go install $(GOFLAGS)
clean:
rm -f ipfs-cluster-ctl

5
cmd/ipfs-cluster-ctl/dist/LICENSE vendored Normal file
View File

@ -0,0 +1,5 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View File

@ -22,15 +22,14 @@ type addedOutputQuiet struct {
}
func jsonFormatObject(resp interface{}) {
switch resp.(type) {
switch r := resp.(type) {
case nil:
return
case []*addedOutputQuiet:
// print original objects as in JSON it makes
// no sense to have a human "quiet" output
serials := resp.([]*addedOutputQuiet)
var actual []*api.AddedOutput
for _, s := range serials {
for _, s := range r {
actual = append(actual, s.AddedOutput)
}
jsonFormatPrint(actual)
@ -46,55 +45,55 @@ func jsonFormatPrint(obj interface{}) {
}
func textFormatObject(resp interface{}) {
switch resp.(type) {
switch r := resp.(type) {
case nil:
return
case string:
fmt.Println(resp)
case *api.ID:
textFormatPrintID(resp.(*api.ID))
textFormatPrintID(r)
case *api.GlobalPinInfo:
textFormatPrintGPInfo(resp.(*api.GlobalPinInfo))
textFormatPrintGPInfo(r)
case *api.Pin:
textFormatPrintPin(resp.(*api.Pin))
textFormatPrintPin(r)
case *api.AddedOutput:
textFormatPrintAddedOutput(resp.(*api.AddedOutput))
textFormatPrintAddedOutput(r)
case *addedOutputQuiet:
textFormatPrintAddedOutputQuiet(resp.(*addedOutputQuiet))
textFormatPrintAddedOutputQuiet(r)
case *api.Version:
textFormatPrintVersion(resp.(*api.Version))
textFormatPrintVersion(r)
case *api.Error:
textFormatPrintError(resp.(*api.Error))
textFormatPrintError(r)
case *api.Metric:
textFormatPrintMetric(resp.(*api.Metric))
textFormatPrintMetric(r)
case []*api.ID:
for _, item := range resp.([]*api.ID) {
for _, item := range r {
textFormatObject(item)
}
case []*api.GlobalPinInfo:
for _, item := range resp.([]*api.GlobalPinInfo) {
for _, item := range r {
textFormatObject(item)
}
case []*api.Pin:
for _, item := range resp.([]*api.Pin) {
for _, item := range r {
textFormatObject(item)
}
case []*api.AddedOutput:
for _, item := range resp.([]*api.AddedOutput) {
for _, item := range r {
textFormatObject(item)
}
case []*addedOutputQuiet:
for _, item := range resp.([]*addedOutputQuiet) {
for _, item := range r {
textFormatObject(item)
}
case []*api.Metric:
for _, item := range resp.([]*api.Metric) {
for _, item := range r {
textFormatObject(item)
}
case *api.GlobalRepoGC:
textFormatPrintGlobalRepoGC(resp.(*api.GlobalRepoGC))
textFormatPrintGlobalRepoGC(r)
case []string:
for _, item := range resp.([]string) {
for _, item := range r {
textFormatObject(item)
}
case map[string]api.Alert:
@ -145,36 +144,35 @@ func textFormatPrintID(obj *api.ID) {
}
func textFormatPrintGPInfo(obj *api.GlobalPinInfo) {
fmt.Printf("%s :\n", obj.Cid)
var b strings.Builder
peers := make([]string, 0, len(obj.PeerMap))
for k := range obj.PeerMap {
peers = append(peers, k)
}
sort.Strings(peers)
fmt.Fprintf(&b, "%s", obj.Cid)
if obj.Name != "" {
fmt.Fprintf(&b, " | %s", obj.Name)
}
b.WriteString(":\n")
for _, k := range peers {
v := obj.PeerMap[k]
if len(v.PeerName) > 0 {
fmt.Printf(" > %-20s : %s", v.PeerName, strings.ToUpper(v.Status.String()))
fmt.Fprintf(&b, " > %-20s : %s", v.PeerName, strings.ToUpper(v.Status.String()))
} else {
fmt.Printf(" > %-20s : %s", k, strings.ToUpper(v.Status.String()))
fmt.Fprintf(&b, " > %-20s : %s", k, strings.ToUpper(v.Status.String()))
}
if v.Error != "" {
fmt.Printf(": %s", v.Error)
fmt.Fprintf(&b, ": %s", v.Error)
}
txt, _ := v.TS.MarshalText()
fmt.Printf(" | %s\n", txt)
fmt.Fprintf(&b, " | %s\n", txt)
}
}
func textFormatPrintPInfo(obj *api.PinInfo) {
gpinfo := api.GlobalPinInfo{
Cid: obj.Cid,
PeerMap: map[string]*api.PinInfo{
peer.IDB58Encode(obj.Peer): obj,
},
}
textFormatPrintGPInfo(&gpinfo)
fmt.Print(b.String())
}
func textFormatPrintVersion(obj *api.Version) {
@ -182,7 +180,12 @@ func textFormatPrintVersion(obj *api.Version) {
}
func textFormatPrintPin(obj *api.Pin) {
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, strings.ToUpper(obj.Type.String()))
t := strings.ToUpper(obj.Type.String())
if obj.Mode == api.PinModeDirect {
t = t + "-DIRECT"
}
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, t)
if obj.ReplicationFactorMin < 0 {
fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]")
@ -207,10 +210,15 @@ func textFormatPrintPin(obj *api.Pin) {
fmt.Printf(" | Metadata:")
if len(obj.Metadata) == 0 {
fmt.Printf(" no\n")
fmt.Printf(" no")
} else {
fmt.Printf(" yes\n")
fmt.Printf(" yes")
}
expireAt := "Exp: ∞"
if !obj.ExpireAt.IsZero() {
expireAt = humanize.Time(obj.ExpireAt)
}
fmt.Printf(" | %s\n", expireAt)
}
func textFormatPrintAddedOutput(obj *api.AddedOutput) {
@ -229,11 +237,11 @@ func textFormatPrintMetric(obj *api.Metric) {
if obj.Name == "freespace" {
u, err := strconv.ParseUint(obj.Value, 10, 64)
checkErr("parsing to uint64", err)
fmt.Printf("%s | freespace: %s | Expires in: %s\n", peer.IDB58Encode(obj.Peer), humanize.Bytes(u), humanize.Time(time.Unix(0, obj.Expire)))
fmt.Printf("%s | freespace: %s | Expires in: %s\n", peer.Encode(obj.Peer), humanize.Bytes(u), humanize.Time(time.Unix(0, obj.Expire)))
return
}
fmt.Printf("%s | %s | Expires in: %s\n", peer.IDB58Encode(obj.Peer), obj.Name, humanize.Time(time.Unix(0, obj.Expire)))
fmt.Printf("%s | %s | Expires in: %s\n", peer.Encode(obj.Peer), obj.Name, humanize.Time(time.Unix(0, obj.Expire)))
}
func textFormatPrintGlobalRepoGC(obj *api.GlobalRepoGC) {

View File

@ -7,7 +7,7 @@ import (
"sort"
dot "github.com/kishansagathiya/go-dot"
peer "github.com/libp2p/go-libp2p-peer"
peer "github.com/libp2p/go-libp2p-core/peer"
"github.com/ipfs/ipfs-cluster/api"
)
@ -38,16 +38,14 @@ const (
tIPFSMissing // Missing IPFS node
)
var errUnfinishedWrite = errors.New("could not complete write of line to output")
var errUnknownNodeType = errors.New("unsupported node type. Expected cluster or ipfs")
var errCorruptOrdering = errors.New("expected pid to have an ordering within dot writer")
func makeDot(cg *api.ConnectGraph, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]peer.ID)
for k, v := range cg.IPFSLinks {
ipfsEdges[k] = make([]peer.ID, 0)
for _, id := range v {
strPid := peer.IDB58Encode(id)
strPid := peer.Encode(id)
if _, ok := cg.IPFSLinks[strPid]; ok || allIpfs {
ipfsEdges[k] = append(ipfsEdges[k], id)
@ -65,7 +63,7 @@ func makeDot(cg *api.ConnectGraph, w io.Writer, allIpfs bool) error {
dW := dotWriter{
w: w,
dotGraph: dot.NewGraph("cluster"),
self: peer.IDB58Encode(cg.ClusterID),
self: peer.Encode(cg.ClusterID),
trustMap: cg.ClusterTrustLinks,
idToPeername: cg.IDtoPeername,
ipfsEdges: ipfsEdges,
@ -207,7 +205,7 @@ func (dW *dotWriter) print() error {
v := dW.clusterEdges[k]
for _, id := range v {
toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[peer.IDB58Encode(id)]
fromNode := dW.clusterNodes[peer.Encode(id)]
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
}
}
@ -229,7 +227,7 @@ func (dW *dotWriter) print() error {
continue
}
fromNode, ok = dW.ipfsNodes[peer.IDB58Encode(ipfsID)]
fromNode, ok = dW.ipfsNodes[peer.Encode(ipfsID)]
if !ok {
logger.Error("expected a node at this id")
continue
@ -244,7 +242,7 @@ func (dW *dotWriter) print() error {
v := dW.ipfsEdges[k]
toNode := dW.ipfsNodes[k]
for _, id := range v {
idStr := peer.IDB58Encode(id)
idStr := peer.Encode(id)
fromNode, ok := dW.ipfsNodes[idStr]
if !ok {
logger.Error("expected a node here")
@ -257,7 +255,7 @@ func (dW *dotWriter) print() error {
}
func sortedKeys(dict map[string][]peer.ID) []string {
keys := make([]string, len(dict), len(dict))
keys := make([]string, len(dict))
i := 0
for k := range dict {
keys[i] = k

View File

@ -67,53 +67,53 @@ I2 -> I0
}`
var (
pid1, _ = peer.IDB58Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
pid2, _ = peer.IDB58Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ")
pid3, _ = peer.IDB58Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu")
pid4, _ = peer.IDB58Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV")
pid5, _ = peer.IDB58Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq")
pid6, _ = peer.IDB58Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL")
pid1, _ = peer.Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
pid2, _ = peer.Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ")
pid3, _ = peer.Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu")
pid4, _ = peer.Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV")
pid5, _ = peer.Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq")
pid6, _ = peer.Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL")
pid7, _ = peer.IDB58Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb")
pid8, _ = peer.IDB58Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8")
pid9, _ = peer.IDB58Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD")
pid7, _ = peer.Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb")
pid8, _ = peer.Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8")
pid9, _ = peer.Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD")
)
func TestSimpleIpfsGraphs(t *testing.T) {
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
peer.Encode(pid1): {
pid2,
pid3,
},
peer.IDB58Encode(pid2): []peer.ID{
peer.Encode(pid2): {
pid1,
pid3,
},
peer.IDB58Encode(pid3): []peer.ID{
peer.Encode(pid3): {
pid1,
pid2,
},
},
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
peer.Encode(pid4): {
pid5,
pid6,
},
peer.IDB58Encode(pid5): []peer.ID{
peer.Encode(pid5): {
pid4,
pid6,
},
peer.IDB58Encode(pid6): []peer.ID{
peer.Encode(pid6): {
pid4,
pid5,
},
},
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
peer.Encode(pid1): pid4,
peer.Encode(pid2): pid5,
peer.Encode(pid3): pid6,
},
}
buf := new(bytes.Buffer)
@ -181,35 +181,35 @@ func TestIpfsAllGraphs(t *testing.T) {
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
peer.IDB58Encode(pid1): []peer.ID{
peer.Encode(pid1): {
pid2,
pid3,
},
peer.IDB58Encode(pid2): []peer.ID{
peer.Encode(pid2): {
pid1,
pid3,
},
peer.IDB58Encode(pid3): []peer.ID{
peer.Encode(pid3): {
pid1,
pid2,
},
},
IPFSLinks: map[string][]peer.ID{
peer.IDB58Encode(pid4): []peer.ID{
peer.Encode(pid4): {
pid5,
pid6,
pid7,
pid8,
pid9,
},
peer.IDB58Encode(pid5): []peer.ID{
peer.Encode(pid5): {
pid4,
pid6,
pid7,
pid8,
pid9,
},
peer.IDB58Encode(pid6): []peer.ID{
peer.Encode(pid6): {
pid4,
pid5,
pid7,
@ -218,9 +218,9 @@ func TestIpfsAllGraphs(t *testing.T) {
},
},
ClustertoIPFS: map[string]peer.ID{
peer.IDB58Encode(pid1): pid4,
peer.IDB58Encode(pid2): pid5,
peer.IDB58Encode(pid3): pid6,
peer.Encode(pid1): pid4,
peer.Encode(pid2): pid5,
peer.Encode(pid3): pid6,
},
}

View File

@ -16,11 +16,10 @@ import (
"github.com/ipfs/ipfs-cluster/api/rest/client"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
"contrib.go.opencensus.io/exporter/jaeger"
uuid "github.com/google/uuid"
cli "github.com/urfave/cli"
)
@ -29,37 +28,39 @@ const programName = `ipfs-cluster-ctl`
// Version is the cluster-ctl tool version. It should match
// the IPFS cluster's version
const Version = "0.12.0"
const Version = "0.13.0-next"
var (
defaultHost = "/ip4/127.0.0.1/tcp/9094"
defaultTimeout = 0
defaultUsername = ""
defaultPassword = ""
defaultWaitCheckFreq = time.Second
defaultAddParams = api.DefaultAddParams()
)
var logger = logging.Logger("cluster-ctl")
var tracer *jaeger.Exporter
var globalClient client.Client
// Description provides a short summary of the functionality of this tool
var Description = fmt.Sprintf(`
%s is a tool to manage IPFS Cluster nodes.
Use "%s help" to list all available commands and
"%s help <command>" to get usage information for a
specific one.
%s uses the IPFS Cluster API to perform requests and display
responses in a user-readable format. The location of the IPFS
Cluster server is assumed to be %s, but can be
configured with the --host option. To use the secure libp2p-http
API endpoint, use "--host" with the full cluster libp2p listener
address (including the "/p2p/<peerID>" part), and --secret (the
32-byte cluster secret as it appears in the cluster configuration).
%s uses the IPFS Cluster API to perform requests and
display responses in a user-readable format. The location of the IPFS
Cluster server is assumed to be %s, but can be configured
with the --host option. If several multiaddresses are specified
(comma-separated), requests will be sent to the first one and fail-over
over to the others. This also works for dns-based addresses which resolve
to multiple values.
To use the secure libp2p-http API endpoint, use "--host" with
the full cluster libp2p listener address, including the "/p2p/<peerID>"
part, or a /dnsaddr that resolves to it. Provide the cluster secret with
--secret as needed.
For feedback, bug reports or any additional information, visit
https://github.com/ipfs/ipfs-cluster.
@ -70,9 +71,9 @@ https://github.com/ipfs/ipfs-cluster.
programName,
defaultHost)
type peerAddBody struct {
Addr string `json:"peer_multiaddress"`
}
// type peerAddBody struct {
// Addr string `json:"peer_multiaddress"`
// }
func out(m string, a ...interface{}) {
fmt.Fprintf(os.Stderr, m, a...)
@ -97,7 +98,7 @@ func main() {
cli.StringFlag{
Name: "host, l",
Value: defaultHost,
Usage: "Cluster's HTTP or LibP2P-HTTP API endpoint",
Usage: `API endpoint multiaddresses (comma-separated)`,
},
cli.StringFlag{
Name: "secret",
@ -143,14 +144,11 @@ requires authorization. implies --https, which you can disable with --force-http
if c.Bool("debug") {
logging.SetLogLevel("cluster-ctl", "debug")
logging.SetLogLevel("apitypes", "debug")
cfg.LogLevel = "debug"
logger.Debug("debug level enabled")
}
addr, err := ma.NewMultiaddr(c.String("host"))
checkErr("parsing host multiaddress", err)
cfg.APIAddr = addr
if hexSecret := c.String("secret"); hexSecret != "" {
secret, err := hex.DecodeString(hexSecret)
checkErr("parsing secret", err)
@ -159,17 +157,13 @@ requires authorization. implies --https, which you can disable with --force-http
cfg.Timeout = time.Duration(c.Int("timeout")) * time.Second
if client.IsPeerAddress(cfg.APIAddr) && c.Bool("https") {
logger.Warning("Using libp2p-http. SSL flags will be ignored")
}
cfg.SSL = c.Bool("https")
cfg.NoVerifyCert = c.Bool("no-check-certificate")
user, pass := parseCredentials(c.String("basic-auth"))
cfg.Username = user
cfg.Password = pass
if user != "" && !cfg.SSL && !c.Bool("force-http") {
logger.Warning("SSL automatically enabled with basic auth credentials. Set \"force-http\" to disable")
logger.Warn("SSL automatically enabled with basic auth credentials. Set \"force-http\" to disable")
cfg.SSL = true
}
@ -178,7 +172,23 @@ requires authorization. implies --https, which you can disable with --force-http
checkErr("", errors.New("unsupported encoding"))
}
globalClient, err = client.NewDefaultClient(cfg)
var configs []*client.Config
var err error
for _, addr := range strings.Split(c.String("host"), ",") {
multiaddr, err := ma.NewMultiaddr(addr)
checkErr("parsing host multiaddress", err)
if client.IsPeerAddress(multiaddr) && c.Bool("https") {
logger.Warn("Using libp2p-http for %s. The https flag will be ignored for this connection", addr)
}
cfgs, err := cfg.AsTemplateForResolvedAddress(ctx, multiaddr)
checkErr("creating configs", err)
configs = append(configs, cfgs...)
}
retries := len(configs)
globalClient, err = client.NewLBClient(&client.Failover{}, configs, retries)
checkErr("creating API client", err)
// TODO: need to figure out best way to configure tracing for ctl
@ -245,7 +255,7 @@ cluster peers.
Flags: []cli.Flag{},
Action: func(c *cli.Context) error {
pid := c.Args().First()
p, err := peer.IDB58Decode(pid)
p, err := peer.Decode(pid)
checkErr("parsing peer ID", err)
cerr := globalClient.PeerRm(ctx, p)
formatResponse(c, nil, cerr)
@ -405,7 +415,7 @@ content.
}
// Read arguments (paths)
paths := make([]string, c.NArg(), c.NArg())
paths := make([]string, c.NArg())
for i, path := range c.Args() {
paths[i] = path
}
@ -522,7 +532,7 @@ config). Positive values indicate how many peers should pin this content.
An optional allocations argument can be provided, allocations should be a
comma-separated list of peer IDs on which we want to pin. Peers in allocations
are prioritized over automatically-determined ones, but replication factors
would stil be respected.
would still be respected.
`,
ArgsUsage: "<CID|Path>",
Flags: []cli.Flag{
@ -550,6 +560,11 @@ would stil be respected.
Value: "",
Usage: "Sets a name for this pin",
},
cli.StringFlag{
Name: "mode",
Value: "recursive",
Usage: "Select a way to pin: recursive or direct",
},
cli.StringFlag{
Name: "expire-in",
Usage: "Duration after which pin should be unpinned automatically",
@ -604,6 +619,7 @@ would stil be respected.
ReplicationFactorMin: rplMin,
ReplicationFactorMax: rplMax,
Name: c.String("name"),
Mode: api.PinModeFromString(c.String("mode")),
UserAllocations: userAllocs,
ExpireAt: expireAt,
Metadata: parseMetadata(c.StringSlice("metadata")),
@ -684,9 +700,18 @@ existing item from the cluster. Please run "pin rm" for that.
`,
ArgsUsage: "<existing-CID> <new-CID|Path>",
Flags: []cli.Flag{
cli.StringFlag{
Name: "name, n",
Value: "",
Usage: "Sets a name for this updated pin",
},
cli.StringFlag{
Name: "expire-in",
Usage: "Duration after which the pin should be unpinned automatically after updating",
},
cli.BoolFlag{
Name: "no-status, ns",
Usage: "Prevents fetching pin status after unpinning (faster, quieter)",
Usage: "Prevents fetching pin status after updating (faster, quieter)",
},
cli.BoolFlag{
Name: "wait, w",
@ -705,8 +730,17 @@ existing item from the cluster. Please run "pin rm" for that.
fromCid, err := cid.Decode(from)
checkErr("parsing from Cid", err)
var expireAt time.Time
if expireIn := c.String("expire-in"); expireIn != "" {
d, err := time.ParseDuration(expireIn)
checkErr("parsing expire-in", err)
expireAt = time.Now().Add(d)
}
opts := api.PinOptions{
PinUpdate: fromCid,
Name: c.String("name"),
ExpireAt: expireAt,
}
pin, cerr := globalClient.PinPath(ctx, to, opts)
@ -735,10 +769,10 @@ the cluster. For IPFS-status information about the pins, use "status".
The filter only takes effect when listing all pins. The possible values are:
- all
- pin
- meta-pin
- clusterdag-pin
- shard-pin
- pin (normal pins, recursive or direct)
- meta-pin (sharded pins)
- clusterdag-pin (sharding-dag root pins)
- shard-pin (individual shard pins)
`,
ArgsUsage: "[CID]",
Flags: []cli.Flag{
@ -989,14 +1023,6 @@ deamon, otherwise on all IPFS daemons.
app.Run(os.Args)
}
func parseFlag(t int) cli.IntFlag {
return cli.IntFlag{
Name: "parseAs",
Value: t,
Hidden: true,
}
}
func localFlag() cli.BoolFlag {
return cli.BoolFlag{
Name: "local",

View File

@ -1,15 +1,17 @@
# go source files
SRC := $(shell find .. -type f -name '*.go')
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-follow
ipfs-cluster-follow: $(SRC)
go build -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
build: ipfs-cluster-follow
install:
go install -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
clean:
rm -f ipfs-cluster-follow

View File

@ -22,6 +22,7 @@ import (
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
"github.com/ipfs/ipfs-cluster/pintracker/stateless"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
cli "github.com/urfave/cli/v2"
)
@ -118,9 +119,12 @@ func infoCmd(c *cli.Context) error {
if err != nil {
if config.IsErrFetchingSource(err) {
url = fmt.Sprintf(
"failed retrieving configuration source: %s",
"failed retrieving configuration source (%s)",
cfgHelper.Manager().Source,
)
ipfsCfg := ipfshttp.Config{}
ipfsCfg.Default()
cfgHelper.Configs().Ipfshttp = &ipfsCfg
} else {
return cli.Exit(errors.Wrapf(err, "reading the configurations in %s", absPath), 1)
}
@ -277,15 +281,28 @@ func runCmd(c *cli.Context) error {
cfgHelper.Manager().Shutdown()
cfgs := cfgHelper.Configs()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgs)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating state manager"), 1)
}
store, err := stmgr.GetStore()
if err != nil {
return cli.Exit(errors.Wrap(err, "creating datastore"), 1)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster)
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating libp2p components"), 1)
}
// Always run followers in follower mode.
cfgs.Cluster.FollowerMode = true
// Do not let trusted peers GC this peer
// Defaults to Trusted otherwise.
cfgs.Cluster.RPCPolicy["Cluster.RepoGCLocal"] = ipfscluster.RPCClosed
// Discard API configurations and create our own
apiCfg := rest.Config{}
@ -295,11 +312,11 @@ func runCmd(c *cli.Context) error {
if err != nil {
return cli.Exit(err, 1)
}
apiCfg.HTTPListenAddr = listenSocket
apiCfg.HTTPListenAddr = []multiaddr.Multiaddr{listenSocket}
// Allow customization via env vars
err = apiCfg.ApplyEnvVars()
if err != nil {
return cli.Exit(errors.Wrap(err, "error applying enviromental variables to restapi configuration"), 1)
return cli.Exit(errors.Wrap(err, "error applying environmental variables to restapi configuration"), 1)
}
rest, err := rest.NewAPI(ctx, &apiCfg)
@ -318,16 +335,6 @@ func runCmd(c *cli.Context) error {
}
alloc := descendalloc.NewAllocator()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.Identity(), cfgs)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating state manager"), 1)
}
store, err := stmgr.GetStore()
if err != nil {
return cli.Exit(errors.Wrap(err, "creating datastore"), 1)
}
crdtcons, err := crdt.New(
host,
dht,
@ -392,7 +399,7 @@ func runCmd(c *cli.Context) error {
return cli.Exit(errors.Wrap(err, "error creating cluster peer"), 1)
}
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht)
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store)
}
// List
@ -400,17 +407,7 @@ func listCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
absPath, configPath, identityPath := buildPaths(c, clusterName)
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
if err != nil {
fmt.Println("error loading configurations.")
if config.IsErrFetchingSource(err) {
fmt.Println("Make sure the source URL is reachable:")
}
return cli.Exit(err, 1)
}
cfgHelper.Manager().Shutdown()
err = printStatusOnline(absPath, clusterName)
err := printStatusOnline(absPath, clusterName)
if err != nil {
apiErr, ok := err.(*api.Error)
if ok && apiErr.Code != 0 {
@ -422,6 +419,15 @@ func listCmd(c *cli.Context) error {
), 1)
}
// Generate a default config just for the purpose of having
// a badger configuration that the state manager can use to
// open and read the database.
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt")
cfgHelper.Manager().Shutdown() // not needed
cfgHelper.Manager().Default() // we have a default crdt/Badger config
cfgHelper.Configs().Badger.SetBaseDir(absPath)
cfgHelper.Manager().ApplyEnvVars()
err := printStatusOffline(cfgHelper)
if err != nil {
return cli.Exit(errors.Wrap(err, "error obtaining the pinset"), 1)
@ -440,8 +446,8 @@ func printStatusOnline(absPath, clusterName string) error {
if err != nil {
return err
}
// do not return errors after this.
// do not return errors after this.
var pid string
for _, gpi := range gpis {
if pid == "" { // do this once
@ -452,29 +458,12 @@ func printStatusOnline(absPath, clusterName string) error {
}
}
pinInfo := gpi.PeerMap[pid]
// Get pin name
var name string
pin, err := client.Allocation(ctx, gpi.Cid)
if err != nil {
name = "(" + err.Error() + ")"
} else {
name = pin.Name
}
printPin(gpi.Cid, pinInfo.Status.String(), name, pinInfo.Error)
printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error)
}
return nil
}
func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
// The blockstore module loaded from ipfs-lite tends to print
// an error when the datastore is closed before the bloom
// filter cached has finished building. Could not find a way
// to avoid it other than disabling bloom chaching on offline
// ipfs-lite peers which is overkill. So we just hide it.
ipfscluster.SetFacilityLogLevel("blockstore", "critical")
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
if err != nil {
return err

5
cmd/ipfs-cluster-follow/dist/LICENSE vendored Normal file
View File

@ -0,0 +1,5 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View File

@ -248,6 +248,13 @@ The peer will stay running in the foreground until manually stopped.
Name: "init",
Usage: "initialize cluster peer with the given URL before running",
},
&cli.StringFlag{
Name: "gateway",
Value: DefaultGateway,
Usage: "gateway URL",
EnvVars: []string{"IPFS_GATEWAY"},
Hidden: true,
},
},
},
{

View File

@ -1,15 +1,17 @@
# go source files
SRC := $(shell find .. -type f -name '*.go')
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-service
ipfs-cluster-service: $(SRC)
go build -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
build: ipfs-cluster-service
install:
go install -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
clean:
rm -f ipfs-cluster-service

View File

@ -23,7 +23,7 @@ import (
ds "github.com/ipfs/go-datastore"
host "github.com/libp2p/go-libp2p-core/host"
peer "github.com/libp2p/go-libp2p-core/peer"
dht "github.com/libp2p/go-libp2p-kad-dht"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
ma "github.com/multiformats/go-multiaddr"
@ -87,10 +87,12 @@ func daemon(c *cli.Context) error {
cfgs.Cluster.LeaveOnShutdown = true
}
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster)
store := setupDatastore(cfgHelper)
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store)
checkErr("creating libp2p host", err)
cluster, err := createCluster(ctx, c, cfgHelper, host, pubsub, dht, raftStaging)
cluster, err := createCluster(ctx, c, cfgHelper, host, pubsub, dht, store, raftStaging)
checkErr("starting cluster", err)
// noop if no bootstraps
@ -100,7 +102,7 @@ func daemon(c *cli.Context) error {
// will realize).
go bootstrap(ctx, cluster, bootstraps)
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht)
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store)
}
// createCluster creates all the necessary things to produce the cluster
@ -112,14 +114,18 @@ func createCluster(
cfgHelper *cmdutils.ConfigHelper,
host host.Host,
pubsub *pubsub.PubSub,
dht *dht.IpfsDHT,
dht *dual.DHT,
store ds.Datastore,
raftStaging bool,
) (*ipfscluster.Cluster, error) {
cfgs := cfgHelper.Configs()
cfgMgr := cfgHelper.Manager()
cfgBytes, err := cfgMgr.ToDisplayJSON()
checkErr("getting configuration string", err)
logger.Debugf("Configuration:\n%s\n", cfgBytes)
ctx, err := tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))
ctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))
checkErr("tag context with host id", err)
var apis []ipfscluster.API
@ -161,8 +167,6 @@ func createCluster(
tracer, err := observations.SetupTracing(cfgs.Tracing)
checkErr("setting up Tracing", err)
store := setupDatastore(cfgHelper)
cons, err := setupConsensus(
cfgHelper,
host,
@ -230,7 +234,7 @@ func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore {
func setupConsensus(
cfgHelper *cmdutils.ConfigHelper,
h host.Host,
dht *dht.IpfsDHT,
dht *dual.DHT,
pubsub *pubsub.PubSub,
store ds.Datastore,
raftStaging bool,

5
cmd/ipfs-cluster-service/dist/LICENSE vendored Normal file
View File

@ -0,0 +1,5 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View File

@ -17,7 +17,7 @@ const lockFileName = "cluster.lock"
var locker *lock
// lock helps to coordinate procees via a lock file
// lock helps to coordinate proceeds via a lock file
type lock struct {
lockCloser io.Closer
path string

View File

@ -20,7 +20,7 @@ import (
ma "github.com/multiformats/go-multiaddr"
semver "github.com/blang/semver"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
cli "github.com/urfave/cli"
)
@ -328,12 +328,11 @@ the peer IDs in the given multiaddresses.
// Generate defaults for all registered components
err := cfgHelper.Manager().Default()
checkErr("generating default configuration", err)
if c.Bool("randomports") {
cfgs := cfgHelper.Configs()
for i := range cfgs.Cluster.ListenAddr {
cfgs.Cluster.ListenAddr[i], err = cmdutils.RandomizePorts(cfgs.Cluster.ListenAddr[i])
}
cfgs.Cluster.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Cluster.ListenAddr)
checkErr("randomizing ports", err)
cfgs.Restapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Restapi.HTTPListenAddr)
checkErr("randomizing ports", err)

View File

@ -9,25 +9,30 @@ import (
)
func TestRandomPorts(t *testing.T) {
port := "9096"
m1, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/9096")
m2, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/9096")
m2, _ := ma.NewMultiaddr("/ip6/::/udp/9096")
m1, err := cmdutils.RandomizePorts(m1)
addresses, err := cmdutils.RandomizePorts([]ma.Multiaddr{m1, m2})
if err != nil {
t.Fatal(err)
}
v1, err := m1.ValueForProtocol(ma.P_TCP)
v1, err := addresses[0].ValueForProtocol(ma.P_TCP)
if err != nil {
t.Fatal(err)
}
v2, err := m2.ValueForProtocol(ma.P_TCP)
v2, err := addresses[1].ValueForProtocol(ma.P_UDP)
if err != nil {
t.Fatal(err)
}
if v1 == v2 {
t.Error("expected different ports")
if v1 == port {
t.Error("expected different ipv4 ports")
}
if v2 == port {
t.Error("expected different ipv6 ports")
}
}

View File

@ -5,66 +5,98 @@ package cmdutils
import (
"context"
"fmt"
"io"
"net"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/ipfs/go-datastore"
ipfscluster "github.com/ipfs/ipfs-cluster"
ipfshttp "github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
host "github.com/libp2p/go-libp2p-host"
dht "github.com/libp2p/go-libp2p-kad-dht"
host "github.com/libp2p/go-libp2p-core/host"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"go.uber.org/multierr"
)
// RandomizePorts replaces TCP and UDP ports with random, but valid port
// values.
func RandomizePorts(m ma.Multiaddr) (ma.Multiaddr, error) {
var prev string
// values, on the given multiaddresses
func RandomizePorts(addrs []ma.Multiaddr) ([]ma.Multiaddr, error) {
results := make([]ma.Multiaddr, 0, len(addrs))
var err error
components := []ma.Multiaddr{}
ma.ForEach(m, func(c ma.Component) bool {
code := c.Protocol().Code
for _, m := range addrs {
var prev string
var err error
components := []ma.Multiaddr{}
ma.ForEach(m, func(c ma.Component) bool {
code := c.Protocol().Code
if code != ma.P_TCP && code != ma.P_UDP {
components = append(components, &c)
if code != ma.P_TCP && code != ma.P_UDP {
components = append(components, &c)
prev = c.Value()
return true
}
var ln io.Closer
var port int
ip := prev
if strings.Contains(ip, ":") { // ipv6 needs bracketing
ip = "[" + ip + "]"
}
if c.Protocol().Code == ma.P_UDP {
ln, port, err = listenUDP(c.Protocol().Name, ip)
} else {
ln, port, err = listenTCP(c.Protocol().Name, ip)
}
if err != nil {
return false
}
defer ln.Close()
var c1 *ma.Component
c1, err = ma.NewComponent(c.Protocol().Name, fmt.Sprintf("%d", port))
if err != nil {
return false
}
components = append(components, c1)
prev = c.Value()
return true
}
var ln net.Listener
ln, err = net.Listen(c.Protocol().Name, prev+":")
})
if err != nil {
return false
return results, err
}
defer ln.Close()
results = append(results, ma.Join(components...))
}
var c1 *ma.Component
c1, err = ma.NewComponent(c.Protocol().Name, fmt.Sprintf("%d", getPort(ln, code)))
if err != nil {
return false
}
components = append(components, c1)
prev = c.Value()
return true
})
return ma.Join(components...), err
return results, nil
}
func getPort(ln net.Listener, code int) int {
if code == ma.P_TCP {
return ln.Addr().(*net.TCPAddr).Port
// returns the listener so it can be closed later and port
func listenTCP(name, ip string) (io.Closer, int, error) {
ln, err := net.Listen(name, ip+":0")
if err != nil {
return nil, 0, err
}
if code == ma.P_UDP {
return ln.Addr().(*net.UDPAddr).Port
return ln, ln.Addr().(*net.TCPAddr).Port, nil
}
// returns the listener so it can be cloesd later and port
func listenUDP(name, ip string) (io.Closer, int, error) {
ln, err := net.ListenPacket(name, ip+":0")
if err != nil {
return nil, 0, err
}
return 0
return ln, ln.LocalAddr().(*net.UDPAddr).Port, nil
}
// HandleSignals orderly shuts down an IPFS Cluster peer
@ -75,7 +107,8 @@ func HandleSignals(
cancel context.CancelFunc,
cluster *ipfscluster.Cluster,
host host.Host,
dht *dht.IpfsDHT,
dht *dual.DHT,
store datastore.Datastore,
) error {
signalChan := make(chan os.Signal, 20)
signal.Notify(
@ -93,9 +126,11 @@ func HandleSignals(
handleCtrlC(ctx, cluster, ctrlcCount)
case <-cluster.Done():
cancel()
dht.Close()
host.Close()
return nil
return multierr.Combine(
dht.Close(),
host.Close(),
store.Close(),
)
}
}
}
@ -133,7 +168,7 @@ func ErrorOut(m string, a ...interface{}) {
// WaitForIPFS hangs until IPFS API becomes available or the given context is
// cancelled. The IPFS API location is determined by the default ipfshttp
// component configuration and can be overriden using environment variables
// component configuration and can be overridden using environment variables
// that affect that configuration. Note that we have to do this in the blind,
// since we want to wait for IPFS before we even fetch the IPFS component
// configuration (because the configuration might be hosted on IPFS itself)

View File

@ -37,7 +37,7 @@ func NewStateManager(consensus string, ident *config.Identity, cfgs *Configs) (S
case cfgs.Raft.ConfigKey():
return &raftStateManager{ident, cfgs}, nil
case cfgs.Crdt.ConfigKey():
return &crdtStateManager{ident, cfgs}, nil
return &crdtStateManager{cfgs}, nil
case "":
return nil, errors.New("could not determine the consensus component")
default:
@ -113,8 +113,7 @@ func (raftsm *raftStateManager) Clean() error {
}
type crdtStateManager struct {
ident *config.Identity
cfgs *Configs
cfgs *Configs
}
func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) {

View File

@ -14,7 +14,7 @@ import (
"sync"
"time"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
)
var logger = logging.Logger("config")
@ -58,6 +58,8 @@ type ComponentConfig interface {
// Provides a channel to signal the Manager that the configuration
// should be persisted.
SaveCh() <-chan struct{}
// ToDisplayJSON returns a string representing the config excluding hidden fields.
ToDisplayJSON() ([]byte, error)
}
// These are the component configuration types
@ -512,7 +514,6 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
if cfg.clusterConfig != nil {
cfg.clusterConfig.SetBaseDir(dir)
raw, err := cfg.clusterConfig.ToJSON()
if err != nil {
return nil, err
}
@ -541,6 +542,52 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
return nil
}
err = cfg.applyUpdateJSONConfigs(jcfg, updateJSONConfigs)
if err != nil {
return nil, err
}
return DefaultJSONMarshal(jcfg)
}
// ToDisplayJSON returns a printable cluster configuration.
func (cfg *Manager) ToDisplayJSON() ([]byte, error) {
jcfg := &jsonConfig{}
if cfg.clusterConfig != nil {
raw, err := cfg.clusterConfig.ToDisplayJSON()
if err != nil {
return nil, err
}
jcfg.Cluster = new(json.RawMessage)
*jcfg.Cluster = raw
}
updateJSONConfigs := func(section Section, dest *jsonSection) error {
for k, v := range section {
j, err := v.ToDisplayJSON()
if err != nil {
return err
}
if *dest == nil {
*dest = make(jsonSection)
}
jsonSection := *dest
jsonSection[k] = new(json.RawMessage)
*jsonSection[k] = j
}
return nil
}
err := cfg.applyUpdateJSONConfigs(jcfg, updateJSONConfigs)
if err != nil {
return nil, err
}
return DefaultJSONMarshal(jcfg)
}
func (cfg *Manager) applyUpdateJSONConfigs(jcfg *jsonConfig, updateJSONConfigs func(section Section, dest *jsonSection) error) error {
for _, t := range SectionTypes() {
if t == Cluster {
continue
@ -548,11 +595,11 @@ func (cfg *Manager) ToJSON() ([]byte, error) {
jsection := jcfg.getSection(t)
err := updateJSONConfigs(cfg.sections[t], jsection)
if err != nil {
return nil, err
return err
}
}
return DefaultJSONMarshal(jcfg)
return nil
}
// IsLoadedFromJSON tells whether the given component belonging to

View File

@ -92,6 +92,14 @@ func (m *mockCfg) Validate() error {
return nil
}
func (m *mockCfg) ToDisplayJSON() ([]byte, error) {
return []byte(`
{
"a":"b"
}
`), nil
}
func setupConfigManager() *Manager {
cfg := NewManager()
mockCfg := &mockCfg{}
@ -176,3 +184,29 @@ func TestSaveWithSource(t *testing.T) {
t.Error("should have generated a source-only json")
}
}
func TestDefaultJSONMarshalWithoutHiddenFields(t *testing.T) {
type s struct {
A string `json:"a_key"`
B string `json:"b_key" hidden:"true"`
}
cfg := s{
A: "hi",
B: "there",
}
expected := `{
"a_key": "hi",
"b_key": "XXX_hidden_XXX"
}`
res, err := DisplayJSON(&cfg)
if err != nil {
t.Fatal(err)
}
if string(res) != expected {
t.Error("result does not match expected")
t.Error(string(res))
}
}

View File

@ -7,8 +7,8 @@ import (
"fmt"
"io/ioutil"
crypto "github.com/libp2p/go-libp2p-core/crypto"
peer "github.com/libp2p/go-libp2p-core/peer"
crypto "github.com/libp2p/go-libp2p-crypto"
"github.com/kelseyhightower/envconfig"
)
@ -122,7 +122,7 @@ func (ident *Identity) LoadJSON(raw []byte) error {
}
func (ident *Identity) applyIdentityJSON(jID *identityJSON) error {
pid, err := peer.IDB58Decode(jID.ID)
pid, err := peer.Decode(jID.ID)
if err != nil {
err = fmt.Errorf("error decoding cluster ID: %s", err)
return err

View File

@ -3,6 +3,8 @@ package config
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
)
@ -22,7 +24,7 @@ func (sv *Saver) NotifySave() {
select {
case sv.save <- struct{}{}:
default:
logger.Warning("configuration save channel full")
logger.Warn("configuration save channel full")
}
}
@ -114,3 +116,64 @@ func ParseDurations(component string, args ...*DurationOpt) error {
}
return nil
}
type hiddenField struct{}
func (hf hiddenField) MarshalJSON() ([]byte, error) {
return []byte(`"XXX_hidden_XXX"`), nil
}
func (hf hiddenField) UnmarshalJSON(b []byte) error { return nil }
// DisplayJSON takes pointer to a JSON-friendly configuration struct and
// returns the JSON-encoded representation of it filtering out any struct
// fields marked with the tag `hidden:"true"`, but keeping fields marked
// with `"json:omitempty"`.
func DisplayJSON(cfg interface{}) ([]byte, error) {
cfg = reflect.Indirect(reflect.ValueOf(cfg)).Interface()
origStructT := reflect.TypeOf(cfg)
if origStructT.Kind() != reflect.Struct {
panic("the given argument should be a struct")
}
hiddenFieldT := reflect.TypeOf(hiddenField{})
// create a new struct type with same fields
// but setting hidden fields as hidden.
finalStructFields := []reflect.StructField{}
for i := 0; i < origStructT.NumField(); i++ {
f := origStructT.Field(i)
hidden := f.Tag.Get("hidden") == "true"
if f.PkgPath != "" { // skip unexported
continue
}
if hidden {
f.Type = hiddenFieldT
}
// remove omitempty from tag, ignore other tags except json
var jsonTags []string
for _, s := range strings.Split(f.Tag.Get("json"), ",") {
if s != "omitempty" {
jsonTags = append(jsonTags, s)
}
}
f.Tag = reflect.StructTag(fmt.Sprintf("json:\"%s\"", strings.Join(jsonTags, ",")))
finalStructFields = append(finalStructFields, f)
}
// Parse the original JSON into the new
// struct and re-convert it to JSON.
finalStructT := reflect.StructOf(finalStructFields)
finalValue := reflect.New(finalStructT)
data := finalValue.Interface()
origJSON, err := json.Marshal(cfg)
if err != nil {
return nil, err
}
err = json.Unmarshal(origJSON, data)
if err != nil {
return nil, err
}
return DefaultJSONMarshal(data)
}

View File

@ -30,10 +30,10 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
for _, member := range members {
// one of the entries is for itself, but that shouldn't hurt
cg.ClusterTrustLinks[peer.IDB58Encode(member)] = c.consensus.IsTrustedPeer(ctx, member)
cg.ClusterTrustLinks[peer.Encode(member)] = c.consensus.IsTrustedPeer(ctx, member)
}
peers := make([][]*api.ID, len(members), len(members))
peers := make([][]*api.ID, len(members))
ctxs, cancels := rpcutil.CtxsWithCancel(ctx, len(members))
defer rpcutil.MultiCancel(cancels)
@ -48,7 +48,7 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
)
for i, err := range errs {
p := peer.IDB58Encode(members[i])
p := peer.Encode(members[i])
cg.ClusterLinks[p] = make([]peer.ID, 0)
if err != nil { // Only setting cluster connections when no error occurs
logger.Debugf("RPC error reaching cluster peer %s: %s", p, err.Error())
@ -59,7 +59,7 @@ func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) {
cg.IDtoPeername[p] = pID.Peername
// IPFS connections
if !selfConnection {
logger.Warningf("cluster peer %s not its own peer. No ipfs info ", p)
logger.Warnf("cluster peer %s not its own peer. No ipfs info ", p)
continue
}
c.recordIPFSLinks(&cg, pID)
@ -76,7 +76,7 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []*ap
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty())
continue
}
if peer.IDB58Encode(id.ID) == p {
if peer.Encode(id.ID) == p {
selfConnection = true
pID = id
} else {
@ -89,15 +89,15 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []*ap
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID *api.ID) {
ipfsID := pID.IPFS.ID
if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs
logger.Warningf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
logger.Warnf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
return
}
pid := peer.IDB58Encode(pID.ID)
ipfsPid := peer.IDB58Encode(ipfsID)
pid := peer.Encode(pID.ID)
ipfsPid := peer.Encode(ipfsID)
if _, ok := cg.IPFSLinks[pid]; ok {
logger.Warningf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
logger.Warnf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
}
cg.ClustertoIPFS[pid] = ipfsID
cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0)

View File

@ -115,7 +115,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
cfg.TrustedPeers = []peer.ID{}
break
}
pid, err := peer.IDB58Decode(p)
pid, err := peer.Decode(p)
if err != nil {
return fmt.Errorf("error parsing trusted peers: %s", err)
}
@ -191,3 +191,8 @@ func (cfg *Config) ApplyEnvVars() error {
return cfg.applyJSONConfig(jcfg)
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View File

@ -39,6 +39,10 @@ func TestLoadJSON(t *testing.T) {
"cluster_name": "test",
"trusted_peers": []
}`))
if err != nil {
t.Fatal(err)
}
if cfg.TrustAll {
t.Error("TrustAll is only enabled with '*'")
}

View File

@ -7,6 +7,7 @@ import (
"sync"
"time"
"github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/pstoremgr"
"github.com/ipfs/ipfs-cluster/state"
@ -17,12 +18,12 @@ import (
query "github.com/ipfs/go-datastore/query"
crdt "github.com/ipfs/go-ds-crdt"
dshelp "github.com/ipfs/go-ipfs-ds-help"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
host "github.com/libp2p/go-libp2p-core/host"
peer "github.com/libp2p/go-libp2p-core/peer"
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
"github.com/libp2p/go-libp2p-core/routing"
rpc "github.com/libp2p/go-libp2p-gorpc"
dht "github.com/libp2p/go-libp2p-kad-dht"
pubsub "github.com/libp2p/go-libp2p-pubsub"
multihash "github.com/multiformats/go-multihash"
@ -64,7 +65,7 @@ type Consensus struct {
crdt *crdt.Datastore
ipfs *ipfslite.Peer
dht *dht.IpfsDHT
dht routing.Routing
pubsub *pubsub.PubSub
rpcClient *rpc.Client
@ -81,7 +82,7 @@ type Consensus struct {
// data and all will be prefixed with cfg.DatastoreNamespace.
func New(
host host.Host,
dht *dht.IpfsDHT,
dht routing.Routing,
pubsub *pubsub.PubSub,
cfg *Config,
store ds.Datastore,
@ -212,11 +213,17 @@ func (css *Consensus) setup() {
ctx, span := trace.StartSpan(css.ctx, "crdt/DeleteHook")
defer span.End()
c, err := dshelp.DsKeyToCid(k)
kb, err := dshelp.BinaryFromDsKey(k)
if err != nil {
logger.Error(err, k)
return
}
c, err := cid.Cast(kb)
if err != nil {
logger.Error(err, k)
return
}
pin := api.PinCid(c)
err = css.rpcClient.CallContext(
@ -316,7 +323,7 @@ func (css *Consensus) Ready(ctx context.Context) <-chan struct{} {
// IsTrustedPeer returns whether the given peer is taken into account
// when submitting updates to the consensus state.
func (css *Consensus) IsTrustedPeer(ctx context.Context, pid peer.ID) bool {
ctx, span := trace.StartSpan(ctx, "consensus/IsTrustedPeer")
_, span := trace.StartSpan(ctx, "consensus/IsTrustedPeer")
defer span.End()
if css.config.TrustAll {
@ -336,7 +343,7 @@ func (css *Consensus) IsTrustedPeer(ctx context.Context, pid peer.ID) bool {
// has the highest priority when the peerstore is saved, and it's addresses
// are always remembered.
func (css *Consensus) Trust(ctx context.Context, pid peer.ID) error {
ctx, span := trace.StartSpan(ctx, "consensus/Trust")
_, span := trace.StartSpan(ctx, "consensus/Trust")
defer span.End()
css.trustedPeers.Store(pid, struct{}{})
@ -351,7 +358,7 @@ func (css *Consensus) Trust(ctx context.Context, pid peer.ID) error {
// Distrust removes a peer from the "trusted" set.
func (css *Consensus) Distrust(ctx context.Context, pid peer.ID) error {
ctx, span := trace.StartSpan(ctx, "consensus/Distrust")
_, span := trace.StartSpan(ctx, "consensus/Distrust")
defer span.End()
css.trustedPeers.Delete(pid)
@ -493,8 +500,7 @@ func OfflineState(cfg *Config, store ds.Datastore) (state.BatchingState, error)
opts := crdt.DefaultOptions()
opts.Logger = logger
var blocksDatastore ds.Batching
blocksDatastore = namespace.Wrap(
var blocksDatastore ds.Batching = namespace.Wrap(
batching,
ds.NewKey(cfg.DatastoreNamespace).ChildString(blocksNs),
)

View File

@ -11,15 +11,18 @@ import (
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
ipns "github.com/ipfs/go-ipns"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p-core/host"
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
dht "github.com/libp2p/go-libp2p-kad-dht"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
record "github.com/libp2p/go-libp2p-record"
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
)
func makeTestingHost(t *testing.T) (host.Host, *pubsub.PubSub, *dht.IpfsDHT) {
func makeTestingHost(t *testing.T) (host.Host, *pubsub.PubSub, *dual.DHT) {
ctx := context.Background()
h, err := libp2p.New(
ctx,
@ -40,19 +43,13 @@ func makeTestingHost(t *testing.T) (host.Host, *pubsub.PubSub, *dht.IpfsDHT) {
t.Fatal(err)
}
idht, err := dht.New(ctx, h)
if err != nil {
h.Close()
t.Fatal(err)
}
btstrCfg := dht.BootstrapConfig{
Queries: 1,
Period: 200 * time.Millisecond,
Timeout: 100 * time.Millisecond,
}
err = idht.BootstrapWithConfig(ctx, btstrCfg)
idht, err := dual.New(ctx, h,
dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})),
dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})),
dual.DHTOption(dht.Concurrency(10)),
dual.DHTOption(dht.RoutingTableRefreshPeriod(200*time.Millisecond)),
dual.DHTOption(dht.RoutingTableRefreshQueryTimeout(100*time.Millisecond)),
)
if err != nil {
h.Close()
t.Fatal(err)

View File

@ -199,7 +199,7 @@ func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
parseDuration := func(txt string) time.Duration {
d, _ := time.ParseDuration(txt)
if txt != "" && d == 0 {
logger.Warningf("%s is not a valid duration. Default will be used", txt)
logger.Warnf("%s is not a valid duration. Default will be used", txt)
}
return d
}
@ -266,7 +266,7 @@ func (cfg *Config) toJSONConfig() *jsonConfig {
}
if cfg.DatastoreNamespace != DefaultDatastoreNamespace {
jcfg.DatastoreNamespace = cfg.DatastoreNamespace
// otherwise leave empty so it gets ommitted.
// otherwise leave empty so it gets omitted.
}
return jcfg
}
@ -313,3 +313,8 @@ func (cfg *Config) GetDataFolder() string {
}
return cfg.DataFolder
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View File

@ -15,13 +15,12 @@ import (
"github.com/ipfs/ipfs-cluster/state/dsstate"
ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
consensus "github.com/libp2p/go-libp2p-consensus"
host "github.com/libp2p/go-libp2p-core/host"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
libp2praft "github.com/libp2p/go-libp2p-raft"
ma "github.com/multiformats/go-multiaddr"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
@ -223,7 +222,7 @@ func (cc *Consensus) SetClient(c *rpc.Client) {
// Ready returns a channel which is signaled when the Consensus
// algorithm has finished bootstrapping and is ready to use
func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} {
ctx, span := trace.StartSpan(ctx, "consensus/Ready")
_, span := trace.StartSpan(ctx, "consensus/Ready")
defer span.End()
return cc.readyCh
@ -263,7 +262,7 @@ func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, err
// No leader, wait for one
if err != nil {
logger.Warning("there seems to be no leader. Waiting for one")
logger.Warn("there seems to be no leader. Waiting for one")
rctx, cancel := context.WithTimeout(
ctx,
cc.config.WaitForLeaderTimeout,
@ -276,7 +275,7 @@ func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, err
if err != nil {
return false, fmt.Errorf("timed out waiting for leader: %s", err)
}
leader, err = peer.IDB58Decode(pidstr)
leader, err = peer.Decode(pidstr)
if err != nil {
return false, err
}
@ -409,7 +408,7 @@ func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
}
// Being here means we are the leader and can commit
cc.shutdownLock.RLock() // do not shutdown while committing
finalErr = cc.raft.AddPeer(ctx, peer.IDB58Encode(pid))
finalErr = cc.raft.AddPeer(ctx, peer.Encode(pid))
cc.shutdownLock.RUnlock()
if finalErr != nil {
@ -440,7 +439,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
}
// Being here means we are the leader and can commit
cc.shutdownLock.RLock() // do not shutdown while committing
finalErr = cc.raft.RemovePeer(ctx, peer.IDB58Encode(pid))
finalErr = cc.raft.RemovePeer(ctx, peer.Encode(pid))
cc.shutdownLock.RUnlock()
if finalErr != nil {
time.Sleep(cc.config.CommitRetryDelay)
@ -458,7 +457,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
// writes to the shared state should happen through the Consensus component
// methods.
func (cc *Consensus) State(ctx context.Context) (state.ReadOnly, error) {
ctx, span := trace.StartSpan(ctx, "consensus/State")
_, span := trace.StartSpan(ctx, "consensus/State")
defer span.End()
st, err := cc.consensus.GetLogHead()
@ -479,7 +478,7 @@ func (cc *Consensus) State(ctx context.Context) (state.ReadOnly, error) {
// Leader returns the peerID of the Leader of the
// cluster. It returns an error when there is no leader.
func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "consensus/Leader")
_, span := trace.StartSpan(ctx, "consensus/Leader")
defer span.End()
// Note the hard-dependency on raft here...
@ -489,7 +488,7 @@ func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) {
// Clean removes the Raft persisted state.
func (cc *Consensus) Clean(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "consensus/Clean")
_, span := trace.StartSpan(ctx, "consensus/Clean")
defer span.End()
cc.shutdownLock.RLock()
@ -532,7 +531,7 @@ func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
sort.Strings(raftPeers)
for _, p := range raftPeers {
id, err := peer.IDB58Decode(p)
id, err := peer.Decode(p)
if err != nil {
panic("could not decode peer")
}
@ -541,14 +540,6 @@ func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) {
return peers, nil
}
func parsePIDFromMultiaddr(addr ma.Multiaddr) string {
pidstr, err := addr.ValueForProtocol(ma.P_P2P)
if err != nil {
panic("peer badly encoded")
}
return pidstr
}
// OfflineState state returns a cluster state by reading the Raft data and
// writing it to the given datastore which is then wrapped as a state.State.
// Usually an in-memory datastore suffices. The given datastore should be

View File

@ -8,7 +8,7 @@ import (
"time"
hclog "github.com/hashicorp/go-hclog"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
)
const (
@ -52,6 +52,21 @@ func (log *hcLogToLogger) format(msg string, args []interface{}) string {
return name + msg + argstr
}
func (log *hcLogToLogger) Log(level hclog.Level, msg string, args ...interface{}) {
switch level {
case hclog.Trace, hclog.Debug:
log.Debug(msg, args)
case hclog.NoLevel, hclog.Info:
log.Info(msg, args)
case hclog.Warn:
log.Warn(msg, args)
case hclog.Error:
log.Error(msg, args)
default:
log.Warn(msg, args)
}
}
func (log *hcLogToLogger) Trace(msg string, args ...interface{}) {
raftLogger.Debug(log.format(msg, args))
}
@ -65,7 +80,7 @@ func (log *hcLogToLogger) Info(msg string, args ...interface{}) {
}
func (log *hcLogToLogger) Warn(msg string, args ...interface{}) {
raftLogger.Warning(log.format(msg, args))
raftLogger.Warn(log.format(msg, args))
}
func (log *hcLogToLogger) Error(msg string, args ...interface{}) {
@ -134,7 +149,7 @@ type logForwarder struct {
var raftStdLogger = log.New(&logForwarder{}, "", 0)
// Write forwards to our go-log logger.
// Write forwards to our go-log/v2 logger.
// According to https://golang.org/pkg/log/#Logger.Output
// it is called per line.
func (fw *logForwarder) Write(p []byte) (n int, e error) {
@ -206,7 +221,7 @@ func (fw *logForwarder) log(t int, msg string) {
case info:
raftLogger.Info(msg)
case warn:
raftLogger.Warning(msg)
raftLogger.Warn(msg)
case err:
raftLogger.Error(msg)
default:

View File

@ -20,10 +20,6 @@ import (
"go.opencensus.io/trace"
)
// errBadRaftState is returned when the consensus component cannot start
// because the cluster peers do not match the raft peers.
var errBadRaftState = errors.New("cluster peers do not match raft peers")
// ErrWaitingForSelf is returned when we are waiting for ourselves to depart
// the peer set, which won't happen
var errWaitingForSelf = errors.New("waiting for ourselves to depart")
@ -78,7 +74,7 @@ func newRaftWrapper(
raftW.host = host
raftW.staging = staging
// Set correct LocalID
cfg.RaftConfig.LocalID = hraft.ServerID(peer.IDB58Encode(host.ID()))
cfg.RaftConfig.LocalID = hraft.ServerID(peer.Encode(host.ID()))
df := cfg.GetDataFolder()
err := makeDataFolder(df)
@ -235,7 +231,7 @@ func makeServerConf(peers []peer.ID) hraft.Configuration {
// Servers are peers + self. We avoid duplicate entries below
for _, pid := range peers {
p := peer.IDB58Encode(pid)
p := peer.Encode(pid)
_, ok := sm[p]
if !ok { // avoid dups
sm[p] = struct{}{}
@ -277,7 +273,7 @@ func (rw *raftWrapper) WaitForVoter(ctx context.Context) error {
logger.Debug("waiting until we are promoted to a voter")
pid := hraft.ServerID(peer.IDB58Encode(rw.host.ID()))
pid := hraft.ServerID(peer.Encode(rw.host.ID()))
for {
select {
case <-ctx.Done():
@ -388,34 +384,29 @@ func (rw *raftWrapper) Snapshot() error {
func (rw *raftWrapper) snapshotOnShutdown() error {
var err error
for i := 0; i < maxShutdownSnapshotRetries; i++ {
done := false
ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout)
err := rw.WaitForUpdates(ctx)
err = rw.WaitForUpdates(ctx)
cancel()
if err != nil {
logger.Warning("timed out waiting for state updates before shutdown. Snapshotting may fail")
done = true // let's not wait for updates again
logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail")
return rw.Snapshot()
}
err = rw.Snapshot()
if err != nil {
err = errors.New("could not snapshot raft: " + err.Error())
} else {
err = nil
done = true
if err == nil {
return nil // things worked
}
if done {
break
}
logger.Warningf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries)
// There was an error
err = errors.New("could not snapshot raft: " + err.Error())
logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries)
}
return err
}
// Shutdown shutdown Raft and closes the BoltDB.
func (rw *raftWrapper) Shutdown(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "consensus/raft/Shutdown")
_, span := trace.StartSpan(ctx, "consensus/raft/Shutdown")
defer span.End()
errMsgs := ""
@ -511,14 +502,14 @@ func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error {
// Leader returns Raft's leader. It may be an empty string if
// there is no leader or it is unknown.
func (rw *raftWrapper) Leader(ctx context.Context) string {
ctx, span := trace.StartSpan(ctx, "consensus/raft/Leader")
_, span := trace.StartSpan(ctx, "consensus/raft/Leader")
defer span.End()
return string(rw.raft.Leader())
}
func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) {
ctx, span := trace.StartSpan(ctx, "consensus/raft/Peers")
_, span := trace.StartSpan(ctx, "consensus/raft/Peers")
defer span.End()
ids := make([]string, 0)
@ -594,8 +585,7 @@ func SnapshotSave(cfg *Config, newState state.State, pids []peer.ID) error {
}
// make a new raft snapshot
var raftSnapVersion hraft.SnapshotVersion
raftSnapVersion = 1 // As of hraft v1.0.0 this is always 1
var raftSnapVersion hraft.SnapshotVersion = 1 // As of hraft v1.0.0 this is always 1
configIndex := uint64(1)
var raftIndex uint64
var raftTerm uint64
@ -654,9 +644,9 @@ func CleanupRaft(cfg *Config) error {
dbh := newDataBackupHelper(dataFolder, keep)
err = dbh.makeBackup()
if err != nil {
logger.Warning(err)
logger.Warning("the state could not be cleaned properly")
logger.Warning("manual intervention may be needed before starting cluster again")
logger.Warn(err)
logger.Warn("the state could not be cleaned properly")
logger.Warn("manual intervention may be needed before starting cluster again")
}
return nil
}
@ -692,7 +682,7 @@ func (rw *raftWrapper) observePeers() {
case obs := <-obsCh:
pObs := obs.Data.(hraft.PeerObservation)
logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID)
pID, err := peer.IDB58Decode(string(pObs.Peer.ID))
pID, err := peer.Decode(string(pObs.Peer.ID))
if err != nil {
logger.Error(err)
continue

View File

@ -22,13 +22,22 @@ const (
)
var (
// DefaultBadgerOptions has to be a var because badger.DefaultOptions is.
// DefaultBadgerOptions has to be a var because badger.DefaultOptions
// is. Values are customized during Init().
DefaultBadgerOptions badger.Options
)
func init() {
// Following go-ds-badger guidance
DefaultBadgerOptions = badger.DefaultOptions("")
DefaultBadgerOptions.CompactL0OnClose = false
DefaultBadgerOptions.Truncate = true
DefaultBadgerOptions.ValueLogLoadingMode = options.FileIO
// Explicitly set this to mmap. This doesn't use much memory anyways.
DefaultBadgerOptions.TableLoadingMode = options.MemoryMap
// Reduce this from 64MiB to 16MiB. That means badger will hold on to
// 20MiB by default instead of 80MiB.
DefaultBadgerOptions.MaxTableSize = 16 << 20
}
// Config is used to initialize a BadgerDB datastore. It implements the
@ -225,3 +234,8 @@ func (cfg *Config) GetFolder() string {
return filepath.Join(cfg.BaseDir, cfg.Folder)
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

119
go.mod
View File

@ -1,82 +1,79 @@
module github.com/ipfs/ipfs-cluster
require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/prometheus v0.1.0
contrib.go.opencensus.io/exporter/jaeger v0.2.1
contrib.go.opencensus.io/exporter/prometheus v0.2.0
github.com/blang/semver v3.5.1+incompatible
github.com/dgraph-io/badger v1.6.0
github.com/dgraph-io/badger v1.6.2
github.com/dustin/go-humanize v1.0.0
github.com/gogo/protobuf v1.3.1
github.com/golang/protobuf v1.3.2
github.com/google/uuid v1.1.1
github.com/gorilla/handlers v1.4.2
github.com/gorilla/mux v1.7.3
github.com/hashicorp/go-hclog v0.10.0
github.com/golang/protobuf v1.4.3
github.com/google/uuid v1.1.2
github.com/gorilla/handlers v1.5.1
github.com/gorilla/mux v1.8.0
github.com/hashicorp/go-hclog v0.14.1
github.com/hashicorp/raft v1.1.1
github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477
github.com/hsanjuan/ipfs-lite v0.1.6
github.com/imdario/mergo v0.3.7
github.com/hsanjuan/ipfs-lite v1.1.17
github.com/imdario/mergo v0.3.11
github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-cid v0.0.3
github.com/ipfs/go-datastore v0.1.1
github.com/ipfs/go-ds-badger v0.0.7
github.com/ipfs/go-ds-crdt v0.1.6
github.com/ipfs/go-fs-lock v0.0.1
github.com/ipfs/go-ipfs-api v0.0.2
github.com/ipfs/go-ipfs-chunker v0.0.1
github.com/ipfs/go-ipfs-config v0.0.11
github.com/ipfs/go-ipfs-ds-help v0.0.1
github.com/ipfs/go-ipfs-files v0.0.6
github.com/ipfs/go-cid v0.0.7
github.com/ipfs/go-datastore v0.4.5
github.com/ipfs/go-ds-badger v0.2.6
github.com/ipfs/go-ds-crdt v0.1.17
github.com/ipfs/go-fs-lock v0.0.6
github.com/ipfs/go-ipfs-api v0.2.0
github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-config v0.10.0
github.com/ipfs/go-ipfs-ds-help v1.0.0
github.com/ipfs/go-ipfs-files v0.0.8
github.com/ipfs/go-ipfs-pinner v0.0.4
github.com/ipfs/go-ipfs-posinfo v0.0.1
github.com/ipfs/go-ipfs-util v0.0.1
github.com/ipfs/go-ipld-cbor v0.0.3
github.com/ipfs/go-ipld-format v0.0.2
github.com/ipfs/go-log v0.0.1
github.com/ipfs/go-merkledag v0.2.3
github.com/ipfs/go-mfs v0.1.1
github.com/ipfs/go-path v0.0.7
github.com/ipfs/go-unixfs v0.2.2
github.com/ipfs/go-ipld-cbor v0.0.4
github.com/ipfs/go-ipld-format v0.2.0
github.com/ipfs/go-ipns v0.0.2
github.com/ipfs/go-log/v2 v2.1.1
github.com/ipfs/go-merkledag v0.3.2
github.com/ipfs/go-mfs v0.1.2
github.com/ipfs/go-path v0.0.8
github.com/ipfs/go-unixfs v0.2.4
github.com/kelseyhightower/envconfig v1.4.0
github.com/kishansagathiya/go-dot v0.1.0
github.com/lanzafame/go-libp2p-ocgorpc v0.1.1
github.com/libp2p/go-libp2p v0.4.1
github.com/libp2p/go-libp2p-autonat-svc v0.1.0
github.com/libp2p/go-libp2p-circuit v0.1.4
github.com/libp2p/go-libp2p-connmgr v0.1.1
github.com/libp2p/go-libp2p v0.11.0
github.com/libp2p/go-libp2p-circuit v0.3.1
github.com/libp2p/go-libp2p-connmgr v0.2.4
github.com/libp2p/go-libp2p-consensus v0.0.1
github.com/libp2p/go-libp2p-core v0.2.5
github.com/libp2p/go-libp2p-crypto v0.1.0
github.com/libp2p/go-libp2p-gorpc v0.1.0
github.com/libp2p/go-libp2p-gostream v0.2.0
github.com/libp2p/go-libp2p-host v0.1.0
github.com/libp2p/go-libp2p-http v0.1.4
github.com/libp2p/go-libp2p-kad-dht v0.2.1
github.com/libp2p/go-libp2p-peer v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.1.4
github.com/libp2p/go-libp2p-pnet v0.1.0
github.com/libp2p/go-libp2p-protocol v0.1.0
github.com/libp2p/go-libp2p-pubsub v0.1.1
github.com/libp2p/go-libp2p-quic-transport v0.2.2
github.com/libp2p/go-libp2p-raft v0.1.4
github.com/libp2p/go-libp2p-secio v0.2.1
github.com/libp2p/go-libp2p-tls v0.1.2
github.com/libp2p/go-ws-transport v0.1.2
github.com/multiformats/go-multiaddr v0.1.2
github.com/libp2p/go-libp2p-core v0.6.1
github.com/libp2p/go-libp2p-gorpc v0.1.1
github.com/libp2p/go-libp2p-gostream v0.2.2
github.com/libp2p/go-libp2p-http v0.1.6
github.com/libp2p/go-libp2p-kad-dht v0.10.0
github.com/libp2p/go-libp2p-noise v0.1.2
github.com/libp2p/go-libp2p-peerstore v0.2.6
github.com/libp2p/go-libp2p-pubsub v0.3.6
github.com/libp2p/go-libp2p-quic-transport v0.8.2
github.com/libp2p/go-libp2p-raft v0.1.6
github.com/libp2p/go-libp2p-record v0.1.3
github.com/libp2p/go-libp2p-secio v0.2.2
github.com/libp2p/go-libp2p-tls v0.1.3
github.com/libp2p/go-ws-transport v0.3.1
github.com/multiformats/go-multiaddr v0.3.1
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.1
github.com/multiformats/go-multicodec v0.1.6
github.com/multiformats/go-multihash v0.0.10
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.1.0
github.com/multiformats/go-multihash v0.0.14
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1
github.com/rs/cors v1.7.0
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926
github.com/ugorji/go/codec v1.1.7
github.com/urfave/cli v1.22.1
github.com/urfave/cli/v2 v2.0.0
go.opencensus.io v0.22.1
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd
github.com/ugorji/go/codec v1.1.13
github.com/urfave/cli v1.22.4
github.com/urfave/cli/v2 v2.2.0
go.opencensus.io v0.22.5
go.uber.org/multierr v1.6.0
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
gonum.org/v1/gonum v0.0.0-20190926113837-94b2bbd8ac13
gonum.org/v1/plot v0.0.0-20190615073203-9aa86143727f
google.golang.org/protobuf v1.25.0
)
go 1.13
go 1.15

1224
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -127,3 +127,8 @@ func (cfg *Config) toJSONConfig() *jsonConfig {
MetricType: cfg.MetricType.String(),
}
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View File

@ -8,7 +8,7 @@ import (
"github.com/ipfs/ipfs-cluster/api"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
rpc "github.com/libp2p/go-libp2p-gorpc"
"go.opencensus.io/trace"
@ -59,7 +59,7 @@ func (disk *Informer) SetClient(c *rpc.Client) {
// Shutdown is called on cluster shutdown. We just invalidate
// any metrics from this point.
func (disk *Informer) Shutdown(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "informer/disk/Shutdown")
_, span := trace.StartSpan(ctx, "informer/disk/Shutdown")
defer span.End()
disk.rpcClient = nil
@ -98,7 +98,13 @@ func (disk *Informer) GetMetric(ctx context.Context) *api.Metric {
} else {
switch disk.config.MetricType {
case MetricFreeSpace:
metric = repoStat.StorageMax - repoStat.RepoSize
size := repoStat.RepoSize
total := repoStat.StorageMax
if size < total {
metric = total - size
} else { // Make sure we don't underflow
metric = 0
}
case MetricRepoSize:
metric = repoStat.RepoSize
}

View File

@ -95,3 +95,8 @@ func (cfg *Config) toJSONConfig() *jsonConfig {
MetricTTL: cfg.MetricTTL.String(),
}
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View File

@ -44,7 +44,7 @@ func (npi *Informer) SetClient(c *rpc.Client) {
// Shutdown is called on cluster shutdown. We just invalidate
// any metrics from this point.
func (npi *Informer) Shutdown(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "informer/numpin/Shutdown")
_, span := trace.StartSpan(ctx, "informer/numpin/Shutdown")
defer span.End()
npi.rpcClient = nil

View File

@ -2,7 +2,7 @@
// allows to orchestrate pinning operations among several IPFS nodes.
//
// IPFS Cluster peers form a separate libp2p swarm. A Cluster peer uses
// multiple Cluster Componenets which perform different tasks like managing
// multiple Cluster Components which perform different tasks like managing
// the underlying IPFS daemons, or providing APIs for external control.
package ipfscluster
@ -56,7 +56,7 @@ type Consensus interface {
// IsTrustedPeer returns true if the given peer is "trusted".
// This will grant access to more rpc endpoints and a
// non-trusted one. This should be fast as it will be
// called repeteadly for every remote RPC request.
// called repeatedly for every remote RPC request.
IsTrustedPeer(context.Context, peer.ID) bool
// Trust marks a peer as "trusted".
Trust(context.Context, peer.ID) error
@ -77,7 +77,7 @@ type IPFSConnector interface {
ID(context.Context) (*api.IPFSID, error)
Pin(context.Context, *api.Pin) error
Unpin(context.Context, cid.Cid) error
PinLsCid(context.Context, cid.Cid) (api.IPFSPinStatus, error)
PinLsCid(context.Context, *api.Pin) (api.IPFSPinStatus, error)
PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error)
// ConnectSwarms make sure this peer's IPFS daemon is connected to
// other peers IPFS daemons.

View File

@ -40,7 +40,9 @@ import (
peer "github.com/libp2p/go-libp2p-core/peer"
peerstore "github.com/libp2p/go-libp2p-core/peerstore"
dht "github.com/libp2p/go-libp2p-kad-dht"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
routedhost "github.com/libp2p/go-libp2p/p2p/host/routed"
ma "github.com/multiformats/go-multiaddr"
)
@ -132,7 +134,7 @@ func TestMain(m *testing.M) {
}
func randomBytes() []byte {
bs := make([]byte, 64, 64)
bs := make([]byte, 64)
for i := 0; i < len(bs); i++ {
b := byte(rand.Int())
bs[i] = b
@ -144,7 +146,7 @@ func createComponents(
t *testing.T,
host host.Host,
pubsub *pubsub.PubSub,
dht *dht.IpfsDHT,
dht *dual.DHT,
i int,
staging bool,
) (
@ -181,9 +183,9 @@ func createComponents(
clusterCfg.LeaveOnShutdown = false
clusterCfg.SetBaseDir(filepath.Join(testsFolder, host.ID().Pretty()))
apiCfg.HTTPListenAddr = apiAddr
apiCfg.HTTPListenAddr = []ma.Multiaddr{apiAddr}
ipfsproxyCfg.ListenAddr = proxyAddr
ipfsproxyCfg.ListenAddr = []ma.Multiaddr{proxyAddr}
ipfsproxyCfg.NodeAddr = nodeAddr
ipfshttpCfg.NodeAddr = nodeAddr
@ -247,7 +249,7 @@ func makeStore(t *testing.T, badgerCfg *badger.Config) ds.Datastore {
}
}
func makeConsensus(t *testing.T, store ds.Datastore, h host.Host, psub *pubsub.PubSub, dht *dht.IpfsDHT, raftCfg *raft.Config, staging bool, crdtCfg *crdt.Config) Consensus {
func makeConsensus(t *testing.T, store ds.Datastore, h host.Host, psub *pubsub.PubSub, dht *dual.DHT, raftCfg *raft.Config, staging bool, crdtCfg *crdt.Config) Consensus {
switch consensus {
case "raft":
raftCon, err := raft.NewConsensus(h, raftCfg, store, staging)
@ -266,7 +268,7 @@ func makeConsensus(t *testing.T, store ds.Datastore, h host.Host, psub *pubsub.P
}
}
func createCluster(t *testing.T, host host.Host, dht *dht.IpfsDHT, clusterCfg *Config, store ds.Datastore, consensus Consensus, apis []API, ipfs IPFSConnector, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer, tracer Tracer) *Cluster {
func createCluster(t *testing.T, host host.Host, dht *dual.DHT, clusterCfg *Config, store ds.Datastore, consensus Consensus, apis []API, ipfs IPFSConnector, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer, tracer Tracer) *Cluster {
cl, err := NewCluster(context.Background(), host, dht, clusterCfg, store, consensus, apis, ipfs, tracker, mon, alloc, []Informer{inf}, tracer)
if err != nil {
t.Fatal(err)
@ -282,10 +284,10 @@ func createOnePeerCluster(t *testing.T, nth int, clusterSecret []byte) (*Cluster
return cl, mock
}
func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host, []*pubsub.PubSub, []*dht.IpfsDHT) {
hosts := make([]host.Host, nClusters, nClusters)
pubsubs := make([]*pubsub.PubSub, nClusters, nClusters)
dhts := make([]*dht.IpfsDHT, nClusters, nClusters)
func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host, []*pubsub.PubSub, []*dual.DHT) {
hosts := make([]host.Host, nClusters)
pubsubs := make([]*pubsub.PubSub, nClusters)
dhts := make([]*dual.DHT, nClusters)
tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic")
@ -304,20 +306,15 @@ func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host
return hosts, pubsubs, dhts
}
func createHost(t *testing.T, priv crypto.PrivKey, clusterSecret []byte, listen []ma.Multiaddr) (host.Host, *pubsub.PubSub, *dht.IpfsDHT) {
func createHost(t *testing.T, priv crypto.PrivKey, clusterSecret []byte, listen []ma.Multiaddr) (host.Host, *pubsub.PubSub, *dual.DHT) {
ctx := context.Background()
prot, err := newProtector(clusterSecret)
h, err := newHost(ctx, clusterSecret, priv, libp2p.ListenAddrs(listen...))
if err != nil {
t.Fatal(err)
}
h, err := newHost(ctx, prot, priv, libp2p.ListenAddrs(listen...))
if err != nil {
t.Fatal(err)
}
// DHT needs to be created BEFORE connecting the peers, but
// bootstrapped AFTER
// DHT needs to be created BEFORE connecting the peers
d, err := newTestDHT(ctx, h)
if err != nil {
t.Fatal(err)
@ -329,34 +326,33 @@ func createHost(t *testing.T, priv crypto.PrivKey, clusterSecret []byte, listen
if err != nil {
t.Fatal(err)
}
return routedHost(h, d), psub, d
return routedhost.Wrap(h, d), psub, d
}
func newTestDHT(ctx context.Context, h host.Host) (*dht.IpfsDHT, error) {
return newDHT(ctx, h)
// TODO: when new dht options are released
// return dht.New(ctx, h, dhtopts.Bootstrap(dhtopts.BootstrapConfig{
// Timeout: 300 * time.Millisecond,
// SelfQueryInterval: 300 * time.Millisecond,
// }))
func newTestDHT(ctx context.Context, h host.Host) (*dual.DHT, error) {
return newDHT(ctx, h, nil,
dual.DHTOption(dht.RoutingTableRefreshPeriod(600*time.Millisecond)),
dual.DHTOption(dht.RoutingTableRefreshQueryTimeout(300*time.Millisecond)),
)
}
func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
ctx := context.Background()
os.RemoveAll(testsFolder)
cfgs := make([]*Config, nClusters, nClusters)
stores := make([]ds.Datastore, nClusters, nClusters)
cons := make([]Consensus, nClusters, nClusters)
apis := make([][]API, nClusters, nClusters)
ipfss := make([]IPFSConnector, nClusters, nClusters)
trackers := make([]PinTracker, nClusters, nClusters)
mons := make([]PeerMonitor, nClusters, nClusters)
allocs := make([]PinAllocator, nClusters, nClusters)
infs := make([]Informer, nClusters, nClusters)
tracers := make([]Tracer, nClusters, nClusters)
ipfsMocks := make([]*test.IpfsMock, nClusters, nClusters)
cfgs := make([]*Config, nClusters)
stores := make([]ds.Datastore, nClusters)
cons := make([]Consensus, nClusters)
apis := make([][]API, nClusters)
ipfss := make([]IPFSConnector, nClusters)
trackers := make([]PinTracker, nClusters)
mons := make([]PeerMonitor, nClusters)
allocs := make([]PinAllocator, nClusters)
infs := make([]Informer, nClusters)
tracers := make([]Tracer, nClusters)
ipfsMocks := make([]*test.IpfsMock, nClusters)
clusters := make([]*Cluster, nClusters, nClusters)
clusters := make([]*Cluster, nClusters)
// Uncomment when testing with fixed ports
// clusterPeers := make([]ma.Multiaddr, nClusters, nClusters)
@ -398,17 +394,6 @@ func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
}
}
// // Bootstrap the DHTs
dhtCfg := dht.BootstrapConfig{
Queries: 1,
Period: 600 * time.Millisecond,
Timeout: 300 * time.Millisecond,
}
for _, d := range dhts {
d.BootstrapWithConfig(ctx, dhtCfg)
}
waitForLeader(t, clusters)
waitForClustersHealthy(t, clusters)
@ -433,6 +418,7 @@ func shutdownCluster(t *testing.T, c *Cluster, m *test.IpfsMock) {
}
func runF(t *testing.T, clusters []*Cluster, f func(*testing.T, *Cluster)) {
t.Helper()
var wg sync.WaitGroup
for _, c := range clusters {
wg.Add(1)
@ -623,7 +609,7 @@ func TestClustersPeersRetainOrder(t *testing.T) {
t.Fatal(err)
}
if bytes.Compare(peers1, peers2) != 0 {
if !bytes.Equal(peers1, peers2) {
t.Error("expected both results to be same")
}
}
@ -725,10 +711,10 @@ func TestClustersPinUpdate(t *testing.T) {
ttlDelay()
h, err := prefix.Sum(randomBytes()) // create random cid
h2, err := prefix.Sum(randomBytes()) // create random cid
h, _ := prefix.Sum(randomBytes()) // create random cid
h2, _ := prefix.Sum(randomBytes()) // create random cid
_, err = clusters[0].PinUpdate(ctx, h, h2, api.PinOptions{})
_, err := clusters[0].PinUpdate(ctx, h, h2, api.PinOptions{})
if err == nil || err != state.ErrNotFound {
t.Fatal("pin update should fail when from is not pinned")
}
@ -739,11 +725,12 @@ func TestClustersPinUpdate(t *testing.T) {
}
pinDelay()
expiry := time.Now().AddDate(1, 0, 0)
opts2 := api.PinOptions{
UserAllocations: []peer.ID{clusters[0].host.ID()}, // should not be used
PinUpdate: h,
Name: "new name",
ExpireAt: expiry,
}
_, err = clusters[0].Pin(ctx, h2, opts2) // should call PinUpdate
@ -766,13 +753,81 @@ func TestClustersPinUpdate(t *testing.T) {
if pinget.MaxDepth != -1 {
t.Error("updated pin should be recursive like pin1")
}
// We compare Unix seconds because our protobuf serde will have
// lost any sub-second precision.
if pinget.ExpireAt.Unix() != expiry.Unix() {
t.Errorf("Expiry didn't match. Expected: %s. Got: %s", expiry, pinget.ExpireAt)
}
if pinget.Name != "new name" {
t.Error("name should be kept")
}
}
runF(t, clusters, f)
}
func TestClustersPinDirect(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
prefix := test.Cid1.Prefix()
ttlDelay()
h, _ := prefix.Sum(randomBytes()) // create random cid
_, err := clusters[0].Pin(ctx, h, api.PinOptions{Mode: api.PinModeDirect})
if err != nil {
t.Fatal(err)
}
pinDelay()
f := func(t *testing.T, c *Cluster, mode api.PinMode) {
pinget, err := c.PinGet(ctx, h)
if err != nil {
t.Fatal(err)
}
if pinget.Mode != mode {
t.Error("pin should be pinned in direct mode")
}
if pinget.MaxDepth != mode.ToPinDepth() {
t.Errorf("pin should have max-depth %d but has %d", mode.ToPinDepth(), pinget.MaxDepth)
}
pInfo := c.StatusLocal(ctx, h)
if pInfo.Error != "" {
t.Error(pInfo.Error)
}
if pInfo.Status != api.TrackerStatusPinned {
t.Error(pInfo.Error)
t.Error("the status should show the hash as pinned")
}
}
runF(t, clusters, func(t *testing.T, c *Cluster) {
f(t, c, api.PinModeDirect)
})
// Convert into a recursive mode
_, err = clusters[0].Pin(ctx, h, api.PinOptions{Mode: api.PinModeRecursive})
if err != nil {
t.Fatal(err)
}
pinDelay()
runF(t, clusters, func(t *testing.T, c *Cluster) {
f(t, c, api.PinModeRecursive)
})
// This should fail as we cannot convert back to direct
_, err = clusters[0].Pin(ctx, h, api.PinOptions{Mode: api.PinModeDirect})
if err == nil {
t.Error("a recursive pin cannot be converted back to direct pin")
}
}
func TestClustersStatusAll(t *testing.T) {
@ -780,7 +835,7 @@ func TestClustersStatusAll(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
clusters[0].Pin(ctx, h, api.PinOptions{Name: "test"})
pinDelay()
// Global status
f := func(t *testing.T, c *Cluster) {
@ -794,12 +849,17 @@ func TestClustersStatusAll(t *testing.T) {
if !statuses[0].Cid.Equals(h) {
t.Error("bad cid in status")
}
if statuses[0].Name != "test" {
t.Error("globalPinInfo should have the name")
}
info := statuses[0].PeerMap
if len(info) != nClusters {
t.Error("bad info in status")
}
pid := peer.IDB58Encode(c.host.ID())
pid := peer.Encode(c.host.ID())
if info[pid].Status != api.TrackerStatusPinned {
t.Error("the hash should have been pinned")
}
@ -827,7 +887,7 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h := test.Cid1
clusters[0].Pin(ctx, h, api.PinOptions{})
clusters[0].Pin(ctx, h, api.PinOptions{Name: "test"})
pinDelay()
// shutdown 1 cluster peer
@ -849,6 +909,14 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Fatal("bad status. Expected one item")
}
if !statuses[0].Cid.Equals(h) {
t.Error("wrong Cid in globalPinInfo")
}
if statuses[0].Name != "test" {
t.Error("wrong Name in globalPinInfo")
}
// Raft and CRDT behave differently here
switch consensus {
case "raft":
@ -860,13 +928,9 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
t.Error("bad number of peers in status")
}
pid := peer.IDB58Encode(clusters[1].id)
pid := peer.Encode(clusters[1].id)
errst := stts.PeerMap[pid]
if !errst.Cid.Equals(h) {
t.Error("errored pinInfo should have a good cid")
}
if errst.Status != api.TrackerStatusClusterError {
t.Error("erroring status should be set to ClusterError:", errst.Status)
}
@ -882,10 +946,6 @@ func TestClustersStatusAllWithErrors(t *testing.T) {
if pinfo.Status != api.TrackerStatusClusterError {
t.Error("erroring status should be ClusterError:", pinfo.Status)
}
if !pinfo.Cid.Equals(h) {
t.Error("errored status should have a good cid")
}
case "crdt":
// CRDT will not have contacted the offline peer because
// its metric expired and therefore is not in the
@ -917,20 +977,20 @@ func TestClustersRecoverLocal(t *testing.T) {
pinDelay()
f := func(t *testing.T, c *Cluster) {
info, err := c.RecoverLocal(ctx, h)
_, err := c.RecoverLocal(ctx, h)
if err != nil {
t.Fatal(err)
}
// Wait for queue to be processed
delay()
info = c.StatusLocal(ctx, h)
info := c.StatusLocal(ctx, h)
if info.Status != api.TrackerStatusPinError {
t.Errorf("element is %s and not PinError", info.Status)
}
// Recover good ID
info, err = c.RecoverLocal(ctx, h2)
info, _ = c.RecoverLocal(ctx, h2)
if info.Status != api.TrackerStatusPinned {
t.Error("element should be in Pinned state")
}
@ -972,7 +1032,7 @@ func TestClustersRecover(t *testing.T) {
t.Fatal(err)
}
pinfo, ok := ginfo.PeerMap[peer.IDB58Encode(clusters[j].host.ID())]
pinfo, ok := ginfo.PeerMap[peer.Encode(clusters[j].host.ID())]
if !ok {
t.Fatal("should have info for this host")
}
@ -981,7 +1041,7 @@ func TestClustersRecover(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
inf, ok := ginfo.PeerMap[peer.Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should not be empty for this host")
}
@ -1006,7 +1066,7 @@ func TestClustersRecover(t *testing.T) {
}
for _, c := range clusters {
inf, ok := ginfo.PeerMap[peer.IDB58Encode(c.host.ID())]
inf, ok := ginfo.PeerMap[peer.Encode(c.host.ID())]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
@ -1355,7 +1415,7 @@ func TestClustersReplicationFactorMin(t *testing.T) {
t.Error("Pin should have failed as rplMin cannot be satisfied")
}
t.Log(err)
if !strings.Contains(err.Error(), fmt.Sprintf("not enough peers to allocate CID")) {
if !strings.Contains(err.Error(), "not enough peers to allocate CID") {
t.Fatal(err)
}
}
@ -1693,7 +1753,7 @@ func TestClustersRebalanceOnPeerDown(t *testing.T) {
// kill the local pinner
for _, c := range clusters {
clid := peer.IDB58Encode(c.id)
clid := peer.Encode(c.id)
if clid == localPinner {
c.Shutdown(ctx)
} else if clid == remotePinner {
@ -1730,7 +1790,7 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[s
// Make lookup index for peers connected to id1
peerIndex := make(map[string]struct{})
for _, p := range peers {
peerIndex[peer.IDB58Encode(p)] = struct{}{}
peerIndex[peer.Encode(p)] = struct{}{}
}
for id2 := range clusterIDs {
if _, ok := peerIndex[id2]; id1 != id2 && !ok {
@ -1757,7 +1817,7 @@ func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[s
if len(graph.IPFSLinks) != 1 {
t.Error("Expected exactly one ipfs peer for all cluster nodes, the mocked peer")
}
links, ok := graph.IPFSLinks[peer.IDB58Encode(test.PeerID1)]
links, ok := graph.IPFSLinks[peer.Encode(test.PeerID1)]
if !ok {
t.Error("Expected the mocked ipfs peer to be a node in the graph")
} else {
@ -1799,7 +1859,7 @@ func TestClustersGraphConnected(t *testing.T) {
clusterIDs := make(map[string]struct{})
for _, c := range clusters {
id := peer.IDB58Encode(c.ID(ctx).ID)
id := peer.Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
validateClusterGraph(t, graph, clusterIDs, nClusters)
@ -1848,7 +1908,7 @@ func TestClustersGraphUnhealthy(t *testing.T) {
if i == discon1 || i == discon2 {
continue
}
id := peer.IDB58Encode(c.ID(ctx).ID)
id := peer.Encode(c.ID(ctx).ID)
clusterIDs[id] = struct{}{}
}
peerNum := nClusters

View File

@ -21,7 +21,7 @@ const (
DefaultNodeAddr = "/ip4/127.0.0.1/tcp/5001"
DefaultConnectSwarmsDelay = 30 * time.Second
DefaultIPFSRequestTimeout = 5 * time.Minute
DefaultPinTimeout = 24 * time.Hour
DefaultPinTimeout = 2 * time.Minute
DefaultUnpinTimeout = 3 * time.Hour
DefaultRepoGCTimeout = 24 * time.Hour
DefaultUnpinDisable = false
@ -205,3 +205,13 @@ func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
return
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}

View File

@ -12,7 +12,7 @@ var cfgJSON = []byte(`
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"connect_swarms_delay": "7s",
"ipfs_request_timeout": "5m0s",
"pin_timeout": "24h",
"pin_timeout": "2m",
"unpin_timeout": "3h",
"repogc_timeout": "24h"
}

View File

@ -21,12 +21,14 @@ import (
cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
logging "github.com/ipfs/go-log"
pinner "github.com/ipfs/go-ipfs-pinner"
logging "github.com/ipfs/go-log/v2"
gopath "github.com/ipfs/go-path"
peer "github.com/libp2p/go-libp2p-core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/multiformats/go-multihash"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
@ -44,10 +46,6 @@ var logger = logging.Logger("ipfshttp")
// only the 10th will trigger a SendInformerMetrics call.
var updateMetricMod = 10
// progressTick sets how often we check progress when doing refs and pins
// requests.
var progressTick = 5 * time.Second
// Connector implements the IPFSConnector interface
// and provides a component which is used to perform
// on-demand requests against the configured IPFS daemom
@ -73,9 +71,20 @@ type Connector struct {
}
type ipfsError struct {
path string
code int
Message string
}
func (ie ipfsError) Error() string {
return fmt.Sprintf(
"IPFS request unsuccessful (%s). Code: %d. Message: %s",
ie.path,
ie.code,
ie.Message,
)
}
type ipfsPinType struct {
Type string
}
@ -98,11 +107,6 @@ type ipfsRepoGCResp struct {
Error string
}
type ipfsRefsResp struct {
Ref string
Err string
}
type ipfsPinsResp struct {
Pins []string
Progress int
@ -112,12 +116,13 @@ type ipfsSwarmPeersResp struct {
Peers []ipfsPeer
}
type ipfsPeer struct {
Peer string
type ipfsBlockPutResp struct {
Key string
Size int
}
type ipfsStream struct {
Protocol string
type ipfsPeer struct {
Peer string
}
// NewConnector creates the component and leaves it ready to be started
@ -216,7 +221,7 @@ func (ipfs *Connector) SetClient(c *rpc.Client) {
// Shutdown stops any listeners and stops the component from taking
// any requests.
func (ipfs *Connector) Shutdown(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Shutdown")
_, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Shutdown")
defer span.End()
ipfs.shutdownLock.Lock()
@ -260,7 +265,7 @@ func (ipfs *Connector) ID(ctx context.Context) (*api.IPFSID, error) {
return nil, err
}
pID, err := peer.IDB58Decode(res.ID)
pID, err := peer.Decode(res.ID)
if err != nil {
return nil, err
}
@ -269,7 +274,7 @@ func (ipfs *Connector) ID(ctx context.Context) (*api.IPFSID, error) {
ID: pID,
}
mAddrs := make([]api.Multiaddr, len(res.Addresses), len(res.Addresses))
mAddrs := make([]api.Multiaddr, len(res.Addresses))
for i, strAddr := range res.Addresses {
mAddr, err := api.NewMultiaddr(strAddr)
if err != nil {
@ -282,7 +287,7 @@ func (ipfs *Connector) ID(ctx context.Context) (*api.IPFSID, error) {
return id, nil
}
func pinArgs(maxDepth int) string {
func pinArgs(maxDepth api.PinDepth) string {
q := url.Values{}
switch {
case maxDepth < 0:
@ -291,7 +296,7 @@ func pinArgs(maxDepth int) string {
q.Set("recursive", "false")
default:
q.Set("recursive", "true")
q.Set("max-depth", strconv.Itoa(maxDepth))
q.Set("max-depth", strconv.Itoa(int(maxDepth)))
}
return q.Encode()
}
@ -305,7 +310,7 @@ func (ipfs *Connector) Pin(ctx context.Context, pin *api.Pin) error {
hash := pin.Cid
maxDepth := pin.MaxDepth
pinStatus, err := ipfs.PinLsCid(ctx, hash)
pinStatus, err := ipfs.PinLsCid(ctx, pin)
if err != nil {
return err
}
@ -324,7 +329,8 @@ func (ipfs *Connector) Pin(ctx context.Context, pin *api.Pin) error {
// is pinned recursively, then do pin/update.
// Otherwise do a normal pin.
if from := pin.PinUpdate; from != cid.Undef {
pinStatus, _ := ipfs.PinLsCid(ctx, from)
fromPin := api.PinWithOpts(from, pin.PinOptions)
pinStatus, _ := ipfs.PinLsCid(ctx, fromPin)
if pinStatus.IsPinned(-1) { // pinned recursively.
// As a side note, if PinUpdate == pin.Cid, we are
// somehow pinning an already pinned thing and we'd
@ -377,7 +383,7 @@ func (ipfs *Connector) Pin(ctx context.Context, pin *api.Pin) error {
// pinProgress pins an item and sends fetched node's progress on a
// channel. Blocks until done or error. pinProgress will always close the out
// channel. pinProgress will not block on sending to the channel if it is full.
func (ipfs *Connector) pinProgress(ctx context.Context, hash cid.Cid, maxDepth int, out chan<- int) error {
func (ipfs *Connector) pinProgress(ctx context.Context, hash cid.Cid, maxDepth api.PinDepth, out chan<- int) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/pinsProgress")
@ -440,31 +446,31 @@ func (ipfs *Connector) Unpin(ctx context.Context, hash cid.Cid) error {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Unpin")
defer span.End()
pinStatus, err := ipfs.PinLsCid(ctx, hash)
if err != nil {
return err
if ipfs.config.UnpinDisable {
return errors.New("ipfs unpinning is disallowed by configuration on this peer")
}
if pinStatus.IsPinned(-1) {
if ipfs.config.UnpinDisable {
return errors.New("ipfs unpinning is disallowed by configuration on this peer")
}
defer ipfs.updateInformerMetric(ctx)
defer ipfs.updateInformerMetric(ctx)
path := fmt.Sprintf("pin/rm?arg=%s", hash)
path := fmt.Sprintf("pin/rm?arg=%s", hash)
ctx, cancel := context.WithTimeout(ctx, ipfs.config.UnpinTimeout)
defer cancel()
ctx, cancel := context.WithTimeout(ctx, ipfs.config.UnpinTimeout)
defer cancel()
_, err := ipfs.postCtx(ctx, path, "", nil)
if err != nil {
// We will call unpin in any case, if the CID is not pinned,
// then we ignore the error (although this is a bit flaky).
_, err := ipfs.postCtx(ctx, path, "", nil)
if err != nil {
ipfsErr, ok := err.(ipfsError)
if !ok || ipfsErr.Message != pinner.ErrNotPinned.Error() {
return err
}
logger.Info("IPFS Unpin request succeeded:", hash)
stats.Record(ctx, observations.Pins.M(-1))
logger.Debug("IPFS object is already unpinned: ", hash)
return nil
}
logger.Debug("IPFS object is already unpinned: ", hash)
logger.Info("IPFS Unpin request succeeded:", hash)
stats.Record(ctx, observations.Pins.M(-1))
return nil
}
@ -498,34 +504,21 @@ func (ipfs *Connector) PinLs(ctx context.Context, typeFilter string) (map[string
return statusMap, nil
}
// PinLsCid performs a "pin ls <hash>" request. It first tries with
// "type=recursive" and then, if not found, with "type=direct". It returns an
// api.IPFSPinStatus for that hash.
func (ipfs *Connector) PinLsCid(ctx context.Context, hash cid.Cid) (api.IPFSPinStatus, error) {
// PinLsCid performs a "pin ls <hash>" request. It will use "type=recursive" or
// "type=direct" (or other) depending on the given pin's MaxDepth setting.
// It returns an api.IPFSPinStatus for that hash.
func (ipfs *Connector) PinLsCid(ctx context.Context, pin *api.Pin) (api.IPFSPinStatus, error) {
ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLsCid")
defer span.End()
pinLsType := func(pinType string) ([]byte, error) {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
lsPath := fmt.Sprintf("pin/ls?arg=%s&type=%s", hash, pinType)
return ipfs.postCtx(ctx, lsPath, "", nil)
}
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
var body []byte
var err error
// FIXME: Sharding may need to check more pin types here.
for _, pinType := range []string{"recursive", "direct"} {
body, err = pinLsType(pinType)
// Network error, daemon down
if body == nil && err != nil {
return api.IPFSPinStatusError, err
}
// Pin found. Do not keep looking.
if err == nil {
break
}
pinType := pin.MaxDepth.ToPinMode().String()
lsPath := fmt.Sprintf("pin/ls?arg=%s&type=%s", pin.Cid, pinType)
body, err := ipfs.postCtx(ctx, lsPath, "", nil)
if body == nil && err != nil { // Network error, daemon down
return api.IPFSPinStatusError, err
}
if err != nil { // we could not find the pin
@ -544,7 +537,7 @@ func (ipfs *Connector) PinLsCid(ctx context.Context, hash cid.Cid) (api.IPFSPinS
// we parse as CID. There should only be one returned key.
for k, pinObj := range res.Keys {
c, err := cid.Decode(k)
if err != nil || !c.Equals(hash) {
if err != nil || !c.Equals(pin.Cid) {
continue
}
return api.IPFSPinStatusFromString(pinObj.Type), nil
@ -582,12 +575,9 @@ func checkResponse(path string, res *http.Response) ([]byte, error) {
if err == nil {
var ipfsErr ipfsError
if err := json.Unmarshal(body, &ipfsErr); err == nil {
return body, fmt.Errorf(
"IPFS request unsuccessful (%s). Code: %d. Message: %s",
path,
res.StatusCode,
ipfsErr.Message,
)
ipfsErr.code = res.StatusCode
ipfsErr.path = path
return body, ipfsErr
}
}
@ -622,24 +612,6 @@ func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType str
return body, nil
}
// postDiscardBodyCtx makes a POST requests but discards the body
// of the response directly after reading it.
func (ipfs *Connector) postDiscardBodyCtx(ctx context.Context, path string) error {
res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, "", nil)
if err != nil {
return err
}
defer res.Body.Close()
_, err = checkResponse(path, res)
if err != nil {
return err
}
_, err = io.Copy(ioutil.Discard, res.Body)
return err
}
// apiURL is a short-hand for building the url of the IPFS
// daemon API.
func (ipfs *Connector) apiURL() string {
@ -730,9 +702,8 @@ func getConfigValue(path []string, cfg map[string]interface{}) (interface{}, err
return value, nil
}
switch value.(type) {
switch v := value.(type) {
case map[string]interface{}:
v := value.(map[string]interface{})
return getConfigValue(path[1:], v)
default:
return nil, errors.New("invalid path")
@ -859,7 +830,7 @@ func (ipfs *Connector) SwarmPeers(ctx context.Context) ([]peer.ID, error) {
swarm := make([]peer.ID, len(peersRaw.Peers))
for i, p := range peersRaw.Peers {
pID, err := peer.IDB58Decode(p.Peer)
pID, err := peer.Decode(p.Peer)
if err != nil {
logger.Error(err)
return swarm, err
@ -887,14 +858,58 @@ func (ipfs *Connector) BlockPut(ctx context.Context, b *api.NodeWithMeta) error
)
multiFileR := files.NewMultiFileReader(mapDir, true)
if b.Format == "" {
b.Format = "v0"
q := make(url.Values, 3)
prefix := b.Cid.Prefix()
format, ok := cid.CodecToStr[prefix.Codec]
if !ok {
return fmt.Errorf("cannot find name for the blocks' CID codec: %x", prefix.Codec)
}
url := "block/put?f=" + b.Format
mhType, ok := multihash.Codes[prefix.MhType]
if !ok {
return fmt.Errorf("cannot find name for the blocks' Multihash type: %x", prefix.MhType)
}
// IPFS behaves differently when using v0 or protobuf which are
// actually the same.
if prefix.Version == 0 {
q.Set("format", "v0")
} else {
q.Set("format", format)
}
q.Set("mhtype", mhType)
q.Set("mhlen", strconv.Itoa(prefix.MhLength))
url := "block/put?" + q.Encode()
contentType := "multipart/form-data; boundary=" + multiFileR.Boundary()
_, err := ipfs.postCtx(ctx, url, contentType, multiFileR)
return err
body, err := ipfs.postCtx(ctx, url, contentType, multiFileR)
if err != nil {
return err
}
var res ipfsBlockPutResp
err = json.Unmarshal(body, &res)
if err != nil {
return err
}
logger.Debug("block/put response CID", res.Key)
respCid, err := cid.Decode(res.Key)
if err != nil {
logger.Error("cannot parse CID from BlockPut response")
return err
}
// IPFS is too brittle here. CIDv0 != CIDv1. Sending "protobuf" format
// returns CidV1. Sending "v0" format (which maps to protobuf)
// returns CidV0. Leaving this as warning.
if !respCid.Equals(b.Cid) {
logger.Warnf("blockPut response CID (%s) does not match the block sent (%s)", respCid, b.Cid)
}
return nil
}
// BlockGet retrieves an ipfs block with the given cid
@ -925,7 +940,7 @@ func (ipfs *Connector) BlockGet(ctx context.Context, c cid.Cid) ([]byte, error)
// if err != nil {
// return err
// }
// logger.Debugf("refs for %s sucessfully fetched", c)
// logger.Debugf("refs for %s successfully fetched", c)
// return nil
// }

View File

@ -7,7 +7,7 @@ import (
"testing"
"time"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
merkledag "github.com/ipfs/go-merkledag"
@ -56,14 +56,14 @@ func TestIPFSID(t *testing.T) {
if id.ID != test.PeerID1 {
t.Error("expected testPeerID")
}
if len(id.Addresses) != 1 {
t.Error("expected 1 address")
if len(id.Addresses) != 2 {
t.Error("expected 2 address")
}
if id.Error != "" {
t.Error("expected no error")
}
mock.Close()
id, err = ipfs.ID(ctx)
_, err = ipfs.ID(ctx)
if err == nil {
t.Error("expected an error")
}
@ -75,12 +75,12 @@ func TestPin(t *testing.T) {
defer mock.Close()
defer ipfs.Shutdown(ctx)
c := test.Cid1
err := ipfs.Pin(ctx, api.PinCid(c))
pin := api.PinCid(test.Cid1)
err := ipfs.Pin(ctx, pin)
if err != nil {
t.Error("expected success pinning cid:", err)
}
pinSt, err := ipfs.PinLsCid(ctx, c)
pinSt, err := ipfs.PinLsCid(ctx, pin)
if err != nil {
t.Fatal("expected success doing ls:", err)
}
@ -88,8 +88,8 @@ func TestPin(t *testing.T) {
t.Error("cid should have been pinned")
}
c2 := test.ErrorCid
err = ipfs.Pin(ctx, api.PinCid(c2))
pin2 := api.PinCid(test.ErrorCid)
err = ipfs.Pin(ctx, pin2)
if err == nil {
t.Error("expected error pinning cid")
}
@ -178,8 +178,9 @@ func TestIPFSPinLsCid(t *testing.T) {
c := test.Cid1
c2 := test.Cid2
ipfs.Pin(ctx, api.PinCid(c))
ips, err := ipfs.PinLsCid(ctx, c)
pin := api.PinCid(c)
ipfs.Pin(ctx, pin)
ips, err := ipfs.PinLsCid(ctx, pin)
if err != nil {
t.Error(err)
}
@ -188,7 +189,7 @@ func TestIPFSPinLsCid(t *testing.T) {
t.Error("c should appear pinned")
}
ips, err = ipfs.PinLsCid(ctx, c2)
ips, err = ipfs.PinLsCid(ctx, api.PinCid(c2))
if err != nil || ips != api.IPFSPinStatusUnpinned {
t.Error("c2 should appear unpinned")
}
@ -201,8 +202,9 @@ func TestIPFSPinLsCid_DifferentEncoding(t *testing.T) {
defer ipfs.Shutdown(ctx)
c := test.Cid4 // ipfs mock treats this specially
ipfs.Pin(ctx, api.PinCid(c))
ips, err := ipfs.PinLsCid(ctx, c)
pin := api.PinCid(c)
ipfs.Pin(ctx, pin)
ips, err := ipfs.PinLsCid(ctx, pin)
if err != nil {
t.Error(err)
}
@ -288,14 +290,22 @@ func TestBlockPut(t *testing.T) {
defer mock.Close()
defer ipfs.Shutdown(ctx)
data := []byte(test.Cid4Data)
// CidV1
err := ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: data,
Cid: test.Cid4,
Format: "raw",
Data: []byte(test.Cid4Data),
Cid: test.Cid4,
})
if err != nil {
t.Fatal(err)
t.Error(err)
}
// CidV0
err = ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: []byte(test.Cid5Data),
Cid: test.Cid5,
})
if err != nil {
t.Error(err)
}
}
@ -314,9 +324,8 @@ func TestBlockGet(t *testing.T) {
// Put and then successfully get
err = ipfs.BlockPut(ctx, &api.NodeWithMeta{
Data: test.ShardData,
Cid: test.ShardCid,
Format: "cbor",
Data: test.ShardData,
Cid: test.ShardCid,
})
if err != nil {
t.Fatal(err)
@ -395,6 +404,9 @@ func TestConfigKey(t *testing.T) {
}
v, err = ipfs.ConfigKey("Datastore")
if err != nil {
t.Fatal(err)
}
_, ok = v.(map[string]interface{})
if !ok {
t.Error("should have returned the whole Datastore config object")

View File

@ -1,26 +1,11 @@
package ipfscluster
import (
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
)
var logger = logging.Logger("cluster")
var (
ansiGray = "\033[0;37m"
ansiYellow = "\033[0;33m"
)
func init() {
// The whole purpose of this is to print the facility name in yellow
// color in the logs because the current blue is very hard to read.
logging.LogFormats["color"] = ansiGray +
"%{time:15:04:05.000} %{color}%{level:5.5s} " +
ansiYellow + "%{module:10.10s}: %{color:reset}%{message} " +
ansiGray + "%{shortfile}%{color:reset}"
logging.SetupLogging()
}
// LoggingFacilities provides a list of logging identifiers
// used by cluster and their default logging level.
var LoggingFacilities = map[string]string{

View File

@ -38,8 +38,6 @@ type Checker struct {
metrics *Store
threshold float64
alertThreshold int
failedPeersMu sync.Mutex
failedPeers map[peer.ID]map[string]int
}
@ -93,20 +91,6 @@ func (mc *Checker) CheckAll() error {
return nil
}
func (mc *Checker) alertIfExpired(metric *api.Metric) error {
if !metric.Expired() {
return nil
}
err := mc.alert(metric.Peer, metric.Name)
if err != nil {
return err
}
metric.Valid = false
mc.metrics.Add(metric) // invalidate so we don't alert again
return nil
}
func (mc *Checker) alert(pid peer.ID, metricName string) error {
mc.failedPeersMu.Lock()
defer mc.failedPeersMu.Unlock()

Some files were not shown because too many files have changed in this diff Show More