2018-03-13 17:16:15 +00:00
|
|
|
package ipfscluster
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/hex"
|
2023-01-27 17:01:24 +00:00
|
|
|
"time"
|
2018-03-13 17:16:15 +00:00
|
|
|
|
2022-06-20 16:23:50 +00:00
|
|
|
config "github.com/ipfs-cluster/ipfs-cluster/config"
|
2021-06-11 16:43:54 +00:00
|
|
|
ds "github.com/ipfs/go-datastore"
|
2021-05-03 15:16:35 +00:00
|
|
|
namespace "github.com/ipfs/go-datastore/namespace"
|
2020-04-06 22:00:51 +00:00
|
|
|
ipns "github.com/ipfs/go-ipns"
|
2018-03-13 17:16:15 +00:00
|
|
|
libp2p "github.com/libp2p/go-libp2p"
|
2022-11-14 08:40:29 +00:00
|
|
|
dht "github.com/libp2p/go-libp2p-kad-dht"
|
|
|
|
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
|
|
|
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
|
|
record "github.com/libp2p/go-libp2p-record"
|
2022-09-06 14:57:17 +00:00
|
|
|
crypto "github.com/libp2p/go-libp2p/core/crypto"
|
|
|
|
host "github.com/libp2p/go-libp2p/core/host"
|
|
|
|
network "github.com/libp2p/go-libp2p/core/network"
|
2023-01-27 17:01:24 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p/core/peer"
|
2022-09-06 14:57:17 +00:00
|
|
|
corepnet "github.com/libp2p/go-libp2p/core/pnet"
|
|
|
|
routing "github.com/libp2p/go-libp2p/core/routing"
|
2023-01-27 17:01:24 +00:00
|
|
|
"github.com/libp2p/go-libp2p/p2p/host/autorelay"
|
2022-06-20 16:23:50 +00:00
|
|
|
connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
2019-11-06 08:54:51 +00:00
|
|
|
identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
|
2022-06-20 16:23:50 +00:00
|
|
|
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
2022-06-20 19:14:45 +00:00
|
|
|
libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
|
2022-06-20 16:23:50 +00:00
|
|
|
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
|
|
|
tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
|
|
|
websocket "github.com/libp2p/go-libp2p/p2p/transport/websocket"
|
2018-03-13 17:16:15 +00:00
|
|
|
)
|
|
|
|
|
2020-04-01 18:15:48 +00:00
|
|
|
const dhtNamespace = "dht"
|
|
|
|
|
2020-04-02 14:12:45 +00:00
|
|
|
var _ = libp2pquic.NewTransport
|
|
|
|
|
2019-11-06 08:54:51 +00:00
|
|
|
func init() {
|
|
|
|
// Cluster peers should advertise their public IPs as soon as they
|
|
|
|
// learn about them. Default for this is 4, which prevents clusters
|
|
|
|
// with less than 4 peers to advertise an external address they know
|
|
|
|
// of, therefore they cannot be remembered by other peers asap. This
|
|
|
|
// affects dockerized setups mostly. This may announce non-dialable
|
|
|
|
// NATed addresses too eagerly, but they should progressively be
|
|
|
|
// cleaned up.
|
|
|
|
identify.ActivationThresh = 1
|
|
|
|
}
|
|
|
|
|
2019-11-05 11:47:06 +00:00
|
|
|
// NewClusterHost creates a fully-featured libp2p Host with the options from
|
|
|
|
// the provided cluster configuration. Using that host, it creates pubsub and
|
2020-04-01 18:15:48 +00:00
|
|
|
// a DHT instances (persisting to the given datastore), for shared use by all
|
|
|
|
// cluster components. The returned host uses the DHT for routing. Relay and
|
|
|
|
// NATService are additionally setup for this host.
|
2019-02-20 14:24:25 +00:00
|
|
|
func NewClusterHost(
|
|
|
|
ctx context.Context,
|
2019-05-06 08:19:46 +00:00
|
|
|
ident *config.Identity,
|
2019-05-06 08:54:31 +00:00
|
|
|
cfg *Config,
|
2021-06-11 16:43:54 +00:00
|
|
|
ds ds.Datastore,
|
2020-04-14 20:03:24 +00:00
|
|
|
) (host.Host, *pubsub.PubSub, *dual.DHT, error) {
|
2019-02-20 14:24:25 +00:00
|
|
|
|
2021-05-03 15:16:35 +00:00
|
|
|
// Set the default dial timeout for all libp2p connections. It is not
|
|
|
|
// very good to touch this global variable here, but the alternative
|
|
|
|
// is to used a modify context everywhere, even if the user supplies
|
|
|
|
// it.
|
|
|
|
network.DialPeerTimeout = cfg.DialPeerTimeout
|
|
|
|
|
2022-03-15 10:50:44 +00:00
|
|
|
connman, err := connmgr.NewConnManager(cfg.ConnMgr.LowWater, cfg.ConnMgr.HighWater, connmgr.WithGracePeriod(cfg.ConnMgr.GracePeriod))
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2019-05-22 22:34:47 +00:00
|
|
|
|
2023-01-27 17:01:24 +00:00
|
|
|
var h host.Host
|
2020-04-14 20:03:24 +00:00
|
|
|
var idht *dual.DHT
|
2023-01-27 17:01:24 +00:00
|
|
|
// a channel to wait until these variables have been set
|
|
|
|
// (or left unset on errors). Mostly to avoid reading while writing.
|
|
|
|
hostAndDHTReady := make(chan struct{})
|
|
|
|
defer close(hostAndDHTReady)
|
|
|
|
|
|
|
|
hostGetter := func() host.Host {
|
|
|
|
<-hostAndDHTReady // closed when we finish NewClusterHost
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
dhtGetter := func() *dual.DHT {
|
|
|
|
<-hostAndDHTReady // closed when we finish NewClusterHost
|
|
|
|
return idht
|
|
|
|
}
|
|
|
|
|
2019-09-29 08:23:13 +00:00
|
|
|
opts := []libp2p.Option{
|
|
|
|
libp2p.ListenAddrs(cfg.ListenAddr...),
|
|
|
|
libp2p.NATPortMap(),
|
|
|
|
libp2p.ConnectionManager(connman),
|
|
|
|
libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
|
2020-04-01 18:15:48 +00:00
|
|
|
idht, err = newDHT(ctx, h, ds)
|
2019-10-31 08:12:14 +00:00
|
|
|
return idht, err
|
2019-09-29 08:23:13 +00:00
|
|
|
}),
|
2021-11-30 05:33:41 +00:00
|
|
|
libp2p.EnableNATService(),
|
2021-11-30 05:25:15 +00:00
|
|
|
libp2p.EnableRelay(),
|
2023-01-27 17:01:24 +00:00
|
|
|
libp2p.EnableAutoRelay(autorelay.WithPeerSource(newPeerSource(hostGetter, dhtGetter), time.Minute)),
|
2021-11-30 05:25:15 +00:00
|
|
|
libp2p.EnableHolePunching(),
|
2019-09-29 08:23:13 +00:00
|
|
|
}
|
|
|
|
|
2021-11-30 05:33:41 +00:00
|
|
|
if cfg.EnableRelayHop {
|
|
|
|
opts = append(opts, libp2p.EnableRelayService())
|
|
|
|
}
|
|
|
|
|
2023-01-27 17:01:24 +00:00
|
|
|
h, err = newHost(
|
2018-08-15 10:30:00 +00:00
|
|
|
ctx,
|
2020-03-13 20:40:02 +00:00
|
|
|
cfg.Secret,
|
2018-08-15 10:30:00 +00:00
|
|
|
ident.PrivateKey,
|
2019-09-29 08:23:13 +00:00
|
|
|
opts...,
|
2018-08-15 10:30:00 +00:00
|
|
|
)
|
2019-02-20 14:24:25 +00:00
|
|
|
if err != nil {
|
2019-08-24 15:09:54 +00:00
|
|
|
return nil, nil, nil, err
|
2019-02-20 14:24:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
psub, err := newPubSub(ctx, h)
|
|
|
|
if err != nil {
|
|
|
|
h.Close()
|
2019-08-24 15:09:54 +00:00
|
|
|
return nil, nil, nil, err
|
2019-02-20 14:24:25 +00:00
|
|
|
}
|
|
|
|
|
2019-10-31 08:12:14 +00:00
|
|
|
return h, psub, idht, nil
|
2019-02-20 14:24:25 +00:00
|
|
|
}
|
|
|
|
|
2019-11-05 11:47:06 +00:00
|
|
|
// newHost creates a base cluster host without dht, pubsub, relay or nat etc.
|
|
|
|
// mostly used for testing.
|
2020-03-13 20:40:02 +00:00
|
|
|
func newHost(ctx context.Context, psk corepnet.PSK, priv crypto.PrivKey, opts ...libp2p.Option) (host.Host, error) {
|
2019-11-05 11:47:06 +00:00
|
|
|
finalOpts := []libp2p.Option{
|
|
|
|
libp2p.Identity(priv),
|
2018-03-13 17:16:15 +00:00
|
|
|
}
|
2020-03-13 20:40:02 +00:00
|
|
|
finalOpts = append(finalOpts, baseOpts(psk)...)
|
2019-11-05 11:47:06 +00:00
|
|
|
finalOpts = append(finalOpts, opts...)
|
2018-08-15 10:30:00 +00:00
|
|
|
|
2019-09-29 08:23:13 +00:00
|
|
|
h, err := libp2p.New(
|
2019-11-05 11:47:06 +00:00
|
|
|
finalOpts...,
|
2018-03-13 17:16:15 +00:00
|
|
|
)
|
2019-09-29 08:23:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-05 11:47:06 +00:00
|
|
|
return h, nil
|
|
|
|
}
|
|
|
|
|
2020-03-13 20:40:02 +00:00
|
|
|
func baseOpts(psk corepnet.PSK) []libp2p.Option {
|
2019-11-05 11:47:06 +00:00
|
|
|
return []libp2p.Option{
|
2020-03-13 20:40:02 +00:00
|
|
|
libp2p.PrivateNetwork(psk),
|
2020-04-06 22:00:51 +00:00
|
|
|
libp2p.EnableNATService(),
|
2020-08-28 21:02:01 +00:00
|
|
|
libp2p.Security(noise.ID, noise.New),
|
2020-08-30 12:17:13 +00:00
|
|
|
libp2p.Security(libp2ptls.ID, libp2ptls.New),
|
2020-03-13 20:40:02 +00:00
|
|
|
// TODO: quic does not support private networks
|
2021-11-30 05:34:13 +00:00
|
|
|
// libp2p.DefaultTransports,
|
|
|
|
libp2p.NoTransports,
|
|
|
|
libp2p.Transport(tcp.NewTCPTransport),
|
|
|
|
libp2p.Transport(websocket.New),
|
2019-09-29 08:23:13 +00:00
|
|
|
}
|
2019-11-05 11:47:06 +00:00
|
|
|
}
|
2019-09-29 08:23:13 +00:00
|
|
|
|
2021-06-11 16:43:54 +00:00
|
|
|
func newDHT(ctx context.Context, h host.Host, store ds.Datastore, extraopts ...dual.Option) (*dual.DHT, error) {
|
2020-09-02 10:06:47 +00:00
|
|
|
opts := []dual.Option{
|
|
|
|
dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})),
|
|
|
|
dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})),
|
|
|
|
dual.DHTOption(dht.Concurrency(10)),
|
2020-04-06 22:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opts = append(opts, extraopts...)
|
2020-04-01 18:15:48 +00:00
|
|
|
|
2021-06-11 16:43:54 +00:00
|
|
|
if batchingDs, ok := store.(ds.Batching); ok {
|
|
|
|
dhtDatastore := namespace.Wrap(batchingDs, ds.NewKey(dhtNamespace))
|
2020-09-02 10:06:47 +00:00
|
|
|
opts = append(opts, dual.DHTOption(dht.Datastore(dhtDatastore)))
|
2020-04-14 21:47:09 +00:00
|
|
|
logger.Debug("enabling DHT record persistence to datastore")
|
2020-04-01 18:15:48 +00:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:03:24 +00:00
|
|
|
return dual.New(ctx, h, opts...)
|
2019-02-20 14:24:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newPubSub(ctx context.Context, h host.Host) (*pubsub.PubSub, error) {
|
|
|
|
return pubsub.NewGossipSub(
|
|
|
|
ctx,
|
|
|
|
h,
|
|
|
|
pubsub.WithMessageSigning(true),
|
|
|
|
pubsub.WithStrictSignatureVerification(true),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-01-27 17:01:24 +00:00
|
|
|
type peerSourceF func(ctx context.Context, numPeers int) <-chan peer.AddrInfo
|
|
|
|
|
|
|
|
// Inspired in Kubo's
|
|
|
|
// https://github.com/ipfs/go-ipfs/blob/9327ee64ce96ca6da29bb2a099e0e0930b0d9e09/core/node/libp2p/relay.go#L79-L103
|
|
|
|
// and https://github.com/ipfs/go-ipfs/blob/9327ee64ce96ca6da29bb2a099e0e0930b0d9e09/core/node/libp2p/routing.go#L242-L317
|
|
|
|
// but simplified and adapted:
|
|
|
|
// - Everytime we need peers for relays we do a DHT lookup.
|
|
|
|
// - We return the peers from that lookup.
|
|
|
|
// - No need to do it async, since we have to wait for the full lookup to
|
|
|
|
// return anyways. We put them on a buffered channel and be done.
|
|
|
|
func newPeerSource(hostGetter func() host.Host, dhtGetter func() *dual.DHT) peerSourceF {
|
|
|
|
return func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
|
|
|
|
// make a channel to return, and put items from numPeers on
|
|
|
|
// that channel up to numPeers. Then close it.
|
|
|
|
r := make(chan peer.AddrInfo, numPeers)
|
|
|
|
defer close(r)
|
|
|
|
|
|
|
|
// Because the Host, DHT are initialized after relay, we need to
|
|
|
|
// obtain them indirectly this way.
|
|
|
|
h := hostGetter()
|
|
|
|
if h == nil { // context canceled etc.
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
idht := dhtGetter()
|
|
|
|
if idht == nil { // context canceled etc.
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// length of closest peers is K.
|
|
|
|
closestPeers, err := idht.WAN.GetClosestPeers(ctx, h.ID().String())
|
|
|
|
if err != nil { // Bail out. Usually a "no peers found".
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Debug("peerSource: %d closestPeers for %d requested", len(closestPeers), numPeers)
|
|
|
|
|
|
|
|
for _, p := range closestPeers {
|
|
|
|
addrs := h.Peerstore().Addrs(p)
|
|
|
|
if len(addrs) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dhtPeer := peer.AddrInfo{ID: p, Addrs: addrs}
|
|
|
|
// Attempt to put peers on r if we have space,
|
|
|
|
// otherwise return (we reached numPeers)
|
|
|
|
select {
|
|
|
|
case r <- dhtPeer:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return r
|
|
|
|
default:
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// We are here if numPeers > closestPeers
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 17:16:15 +00:00
|
|
|
// EncodeProtectorKey converts a byte slice to its hex string representation.
|
|
|
|
func EncodeProtectorKey(secretBytes []byte) string {
|
|
|
|
return hex.EncodeToString(secretBytes)
|
|
|
|
}
|