Merge pull request #46 from ipfs/41-replication

Add replication factor support
This commit is contained in:
Hector Sanjuan 2017-02-15 13:36:45 +01:00 committed by GitHub
commit c04df7ecf3
41 changed files with 3468 additions and 1622 deletions

View File

@ -6,10 +6,7 @@ install:
- go get github.com/mattn/goveralls
- make deps
script:
- make test
- make service
- make ctl
- "$GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN"
- make service && make ctl && ./coverage.sh
env:
global:
secure: M3K3y9+D933tCda7+blW3qqVV8fA6PBDRdJoQvmQc1f0XYbWinJ+bAziFp6diKkF8sMQ+cPwLMONYJuaNT2h7/PkG+sIwF0PuUo5VVCbhGmSDrn2qOjmSnfawNs8wW31f44FQA8ICka1EFZcihohoIMf0e5xZ0tXA9jqw+ngPJiRnv4zyzC3r6t4JMAZcbS9w4KTYpIev5Yj72eCvk6lGjadSVCDVXo2sVs27tNt+BSgtMXiH6Sv8GLOnN2kFspGITgivHgB/jtU6QVtFXB+cbBJJAs3lUYnzmQZ5INecbjweYll07ilwFiCVNCX67+L15gpymKGJbQggloIGyTWrAOa2TMaB/bvblzwwQZ8wE5P3Rss5L0TFkUAcdU+3BUHM+TwV4e8F9x10v1PjgWNBRJQzd1sjKKgGUBCeyCY7VeYDKn9AXI5llISgY/AAfCZwm2cbckMHZZJciMjm+U3Q1FCF+rfhlvUcMG1VEj8r9cGpmWIRjFYVm0NmpUDDNjlC3/lUfTCOOJJyM254EUw63XxabbK6EtDN1yQe8kYRcXH//2rtEwgtMBgqHVY+OOkekzGz8Ra3EBkh6jXrAQL3zKu/GwRlK7/a1OU5MQ7dWcTjbx1AQ6Zfyjg5bZ+idqPgMbqM9Zn2+OaSby8HEEXS0QeZVooDVf/6wdYO4MQ/0A=

View File

@ -55,7 +55,7 @@ deps: gx
$(gx_bin) --verbose install --global
$(gx-go_bin) rewrite
test: deps
go test -tags silent -v -covermode count -coverprofile=coverage.out .
go test -tags silent -v ./...
rw: gx
$(gx-go_bin) rewrite
rwundo: gx

View File

@ -0,0 +1,93 @@
// Package numpinalloc implements an ipfscluster.Allocator based on the "numpin"
// Informer. It is a simple example on how an allocator is implemented.
package numpinalloc
import (
"sort"
"strconv"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
peer "github.com/libp2p/go-libp2p-peer"
)
var logger = logging.Logger("numpinalloc")
// Allocator implements ipfscluster.Allocate.
type Allocator struct{}
func NewAllocator() *Allocator {
return &Allocator{}
}
// SetClient does nothing in this allocator
func (alloc *Allocator) SetClient(c *rpc.Client) {}
// Shutdown does nothing in this allocator
func (alloc *Allocator) Shutdown() error { return nil }
// Allocate returns where to allocate a pin request based on "numpin"-Informer
// metrics. In this simple case, we do not pay attention to the metrics
// of the current, we just need to sort the candidates by number of pins.
func (alloc *Allocator) Allocate(c *cid.Cid, current, candidates map[peer.ID]api.Metric) ([]peer.ID, error) {
// sort our metrics
numpins := newMetricsSorter(candidates)
sort.Sort(numpins)
return numpins.peers, nil
}
// metricsSorter attaches sort.Interface methods to our metrics and sorts
// a slice of peers in the way that interest us
type metricsSorter struct {
peers []peer.ID
m map[peer.ID]int
}
func newMetricsSorter(m map[peer.ID]api.Metric) *metricsSorter {
vMap := make(map[peer.ID]int)
peers := make([]peer.ID, 0, len(m))
for k, v := range m {
if v.Name != numpin.MetricName || v.Discard() {
continue
}
val, err := strconv.Atoi(v.Value)
if err != nil {
continue
}
peers = append(peers, k)
vMap[k] = val
}
sorter := &metricsSorter{
m: vMap,
peers: peers,
}
return sorter
}
// Len returns the number of metrics
func (s metricsSorter) Len() int {
return len(s.peers)
}
// Less reports if the element in position i is less than the element in j
func (s metricsSorter) Less(i, j int) bool {
peeri := s.peers[i]
peerj := s.peers[j]
x := s.m[peeri]
y := s.m[peerj]
return x < y
}
// Swap swaps the elements in positions i and j
func (s metricsSorter) Swap(i, j int) {
temp := s.peers[i]
s.peers[i] = s.peers[j]
s.peers[j] = temp
}

View File

@ -0,0 +1,134 @@
package numpinalloc
import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
)
type testcase struct {
candidates map[peer.ID]api.Metric
current map[peer.ID]api.Metric
expected []peer.ID
}
var (
peer0 = peer.ID("QmUQ6Nsejt1SuZAu8yL8WgqQZHHAYreLVYYa4VPsLUCed7")
peer1 = peer.ID("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
peer2 = peer.ID("QmPrSBATWGAN56fiiEWEhKX3L1F3mTghEQR7vQwaeo7zHi")
peer3 = peer.ID("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
testCid, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
)
var inAMinute = time.Now().Add(time.Minute).Format(time.RFC1123)
var testCases = []testcase{
{ // regular sort
candidates: map[peer.ID]api.Metric{
peer0: api.Metric{
Name: numpin.MetricName,
Value: "5",
Expire: inAMinute,
Valid: true,
},
peer1: api.Metric{
Name: numpin.MetricName,
Value: "1",
Expire: inAMinute,
Valid: true,
},
peer2: api.Metric{
Name: numpin.MetricName,
Value: "3",
Expire: inAMinute,
Valid: true,
},
peer3: api.Metric{
Name: numpin.MetricName,
Value: "2",
Expire: inAMinute,
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
expected: []peer.ID{peer1, peer3, peer2, peer0},
},
{ // filter invalid
candidates: map[peer.ID]api.Metric{
peer0: api.Metric{
Name: numpin.MetricName,
Value: "1",
Expire: inAMinute,
Valid: false,
},
peer1: api.Metric{
Name: numpin.MetricName,
Value: "5",
Expire: inAMinute,
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad metric name
candidates: map[peer.ID]api.Metric{
peer0: api.Metric{
Name: "lalala",
Value: "1",
Expire: inAMinute,
Valid: true,
},
peer1: api.Metric{
Name: numpin.MetricName,
Value: "5",
Expire: inAMinute,
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
expected: []peer.ID{peer1},
},
{ // filter bad value
candidates: map[peer.ID]api.Metric{
peer0: api.Metric{
Name: numpin.MetricName,
Value: "abc",
Expire: inAMinute,
Valid: true,
},
peer1: api.Metric{
Name: numpin.MetricName,
Value: "5",
Expire: inAMinute,
Valid: true,
},
},
current: map[peer.ID]api.Metric{},
expected: []peer.ID{peer1},
},
}
func Test(t *testing.T) {
alloc := &Allocator{}
for i, tc := range testCases {
t.Logf("Test case %d", i)
res, err := alloc.Allocate(testCid, tc.current, tc.candidates)
if err != nil {
t.Fatal(err)
}
if len(res) == 0 {
t.Fatal("0 allocations")
}
for i, r := range res {
if e := tc.expected[i]; r != e {
t.Errorf("Expect r[%d]=%s but got %s", i, r, e)
}
}
}
}

425
api/types.go Normal file
View File

@ -0,0 +1,425 @@
// Package api holds declarations for types used in ipfs-cluster APIs to make
// them re-usable across differen tools. This include RPC API "Serial[izable]"
// versions for types. The Go API uses natives types, while RPC API,
// REST APIs etc use serializable types (i.e. json format). Converstion methods
// exists between types.
//
// Note that all conversion methods ignore any parsing errors. All values must
// be validated first before initializing any of the types defined here.
package api
import (
"time"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
)
// TrackerStatus values
const (
// IPFSStatus should never take this value
TrackerStatusBug = iota
// The cluster node is offline or not responding
TrackerStatusClusterError
// An error occurred pinning
TrackerStatusPinError
// An error occurred unpinning
TrackerStatusUnpinError
// The IPFS daemon has pinned the item
TrackerStatusPinned
// The IPFS daemon is currently pinning the item
TrackerStatusPinning
// The IPFS daemon is currently unpinning the item
TrackerStatusUnpinning
// The IPFS daemon is not pinning the item
TrackerStatusUnpinned
// The IPFS deamon is not pinning the item but it is being tracked
TrackerStatusRemote
)
// TrackerStatus represents the status of a tracked Cid in the PinTracker
type TrackerStatus int
var trackerStatusString = map[TrackerStatus]string{
TrackerStatusBug: "bug",
TrackerStatusClusterError: "cluster_error",
TrackerStatusPinError: "pin_error",
TrackerStatusUnpinError: "unpin_error",
TrackerStatusPinned: "pinned",
TrackerStatusPinning: "pinning",
TrackerStatusUnpinning: "unpinning",
TrackerStatusUnpinned: "unpinned",
TrackerStatusRemote: "remote",
}
// String converts a TrackerStatus into a readable string.
func (st TrackerStatus) String() string {
return trackerStatusString[st]
}
// TrackerStatusFromString parses a string and returns the matching
// TrackerStatus value.
func TrackerStatusFromString(str string) TrackerStatus {
for k, v := range trackerStatusString {
if v == str {
return k
}
}
return TrackerStatusBug
}
// IPFSPinStatus values
const (
IPFSPinStatusBug = iota
IPFSPinStatusError
IPFSPinStatusDirect
IPFSPinStatusRecursive
IPFSPinStatusIndirect
IPFSPinStatusUnpinned
)
// IPFSPinStatus represents the status of a pin in IPFS (direct, recursive etc.)
type IPFSPinStatus int
// IPFSPinStatusFromString parses a string and returns the matching
// IPFSPinStatus.
func IPFSPinStatusFromString(t string) IPFSPinStatus {
// TODO: This is only used in the http_connector to parse
// ipfs-daemon-returned values. Maybe it should be extended.
switch {
case t == "indirect":
return IPFSPinStatusIndirect
case t == "direct":
return IPFSPinStatusDirect
case t == "recursive":
return IPFSPinStatusRecursive
default:
return IPFSPinStatusBug
}
}
// IsPinned returns true if the status is Direct or Recursive
func (ips IPFSPinStatus) IsPinned() bool {
return ips == IPFSPinStatusDirect || ips == IPFSPinStatusRecursive
}
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid *cid.Cid
PeerMap map[peer.ID]PinInfo
}
// GlobalPinInfoSerial is the serializable version of GlobalPinInfo.
type GlobalPinInfoSerial struct {
Cid string `json:"cid"`
PeerMap map[string]PinInfoSerial `json:"peer_map"`
}
// ToSerial converts a GlobalPinInfo to its serializable version.
func (gpi GlobalPinInfo) ToSerial() GlobalPinInfoSerial {
s := GlobalPinInfoSerial{}
s.Cid = gpi.Cid.String()
s.PeerMap = make(map[string]PinInfoSerial)
for k, v := range gpi.PeerMap {
s.PeerMap[peer.IDB58Encode(k)] = v.ToSerial()
}
return s
}
// ToGlobalPinInfo converts a GlobalPinInfoSerial to its native version.
func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
c, _ := cid.Decode(gpis.Cid)
gpi := GlobalPinInfo{
Cid: c,
PeerMap: make(map[peer.ID]PinInfo),
}
for k, v := range gpis.PeerMap {
p, _ := peer.IDB58Decode(k)
gpi.PeerMap[p] = v.ToPinInfo()
}
return gpi
}
// PinInfo holds information about local pins. PinInfo is
// serialized when requesting the Global status, therefore
// we cannot use *cid.Cid.
type PinInfo struct {
Cid *cid.Cid
Peer peer.ID
Status TrackerStatus
TS time.Time
Error string
}
// PinInfoSerial is a serializable version of PinInfo.
// information is marked as
type PinInfoSerial struct {
Cid string `json:"cid"`
Peer string `json:"peer"`
Status string `json:"status"`
TS string `json:"timestamp"`
Error string `json:"error"`
}
// ToSerial converts a PinInfo to its serializable version.
func (pi PinInfo) ToSerial() PinInfoSerial {
return PinInfoSerial{
Cid: pi.Cid.String(),
Peer: peer.IDB58Encode(pi.Peer),
Status: pi.Status.String(),
TS: pi.TS.Format(time.RFC1123),
Error: pi.Error,
}
}
// ToPinInfo converts a PinInfoSerial to its native version.
func (pis PinInfoSerial) ToPinInfo() PinInfo {
c, _ := cid.Decode(pis.Cid)
p, _ := peer.IDB58Decode(pis.Peer)
ts, _ := time.Parse(time.RFC1123, pis.TS)
return PinInfo{
Cid: c,
Peer: p,
Status: TrackerStatusFromString(pis.Status),
TS: ts,
Error: pis.Error,
}
}
// Version holds version information
type Version struct {
Version string `json:"Version"`
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
Error string `json:"error"`
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
return IPFSIDSerial{
ID: peer.IDB58Encode(id.ID),
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToIPFSID converts an IPFSIDSerial to IPFSID
func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
Addresses []ma.Multiaddr
ClusterPeers []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
//PublicKey crypto.PubKey
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
ClusterPeers MultiaddrsSerial `json:"cluster_peers"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error"`
IPFS IPFSIDSerial `json:"ipfs"`
//PublicKey []byte
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
//var pkey []byte
//if id.PublicKey != nil {
// pkey, _ = id.PublicKey.Bytes()
//}
return IDSerial{
ID: peer.IDB58Encode(id.ID),
//PublicKey: pkey,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: MultiaddrsToSerial(id.ClusterPeers),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
p, _ := peer.IDB58Decode(ids.ID)
id.ID = p
//if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
// id.PublicKey = pkey
//}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = ids.ClusterPeers.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToIPFSID()
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial string
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
return MultiaddrSerial(addr.String())
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
a, _ := ma.NewMultiaddr(string(addrS))
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
addrsS[i] = MultiaddrToSerial(a)
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
}
// CidArg is an arguments that carry a Cid. It may carry more things in the
// future.
type CidArg struct {
Cid *cid.Cid
Allocations []peer.ID
Everywhere bool
}
// CidArgCid is a shorcut to create a CidArg only with a Cid.
func CidArgCid(c *cid.Cid) CidArg {
return CidArg{
Cid: c,
}
}
// CidArgSerial is a serializable version of CidArg
type CidArgSerial struct {
Cid string `json:"cid"`
Allocations []string `json:"allocations"`
Everywhere bool `json:"everywhere"`
}
// ToSerial converts a CidArg to CidArgSerial.
func (carg CidArg) ToSerial() CidArgSerial {
lenAllocs := len(carg.Allocations)
allocs := make([]string, lenAllocs, lenAllocs)
for i, p := range carg.Allocations {
allocs[i] = peer.IDB58Encode(p)
}
return CidArgSerial{
Cid: carg.Cid.String(),
Allocations: allocs,
Everywhere: carg.Everywhere,
}
}
// ToCidArg converts a CidArgSerial to its native form.
func (cargs CidArgSerial) ToCidArg() CidArg {
c, _ := cid.Decode(cargs.Cid)
lenAllocs := len(cargs.Allocations)
allocs := make([]peer.ID, lenAllocs, lenAllocs)
for i, p := range cargs.Allocations {
allocs[i], _ = peer.IDB58Decode(p)
}
return CidArg{
Cid: c,
Allocations: allocs,
Everywhere: cargs.Everywhere,
}
}
// Metric transports information about a peer.ID. It is used to decide
// pin allocations by a PinAllocator. IPFS cluster is agnostic to
// the Value, which should be interpreted by the PinAllocator.
type Metric struct {
Name string
Peer peer.ID // filled-in by Cluster.
Value string
Expire string // RFC1123
Valid bool // if the metric is not valid it will be discarded
}
// SetTTL sets Metric to expire after the given seconds
func (m *Metric) SetTTL(seconds int) {
exp := time.Now().Add(time.Duration(seconds) * time.Second)
m.Expire = exp.Format(time.RFC1123)
}
// GetTTL returns the time left before the Metric expires
func (m *Metric) GetTTL() time.Duration {
exp, _ := time.Parse(time.RFC1123, m.Expire)
return exp.Sub(time.Now())
}
// Expired returns if the Metric has expired
func (m *Metric) Expired() bool {
exp, _ := time.Parse(time.RFC1123, m.Expire)
return time.Now().After(exp)
}
// Discard returns if the metric not valid or has expired
func (m *Metric) Discard() bool {
return !m.Valid || m.Expired()
}
// Alert carries alerting information about a peer. WIP.
type Alert struct {
Peer peer.ID
MetricName string
}

195
api/types_test.go Normal file
View File

@ -0,0 +1,195 @@
package api
import (
"testing"
"time"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
var testTime = time.Date(2017, 12, 31, 15, 45, 50, 0, time.UTC)
var testMAddr, _ = ma.NewMultiaddr("/ip4/1.2.3.4")
var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
var testPeerID2, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabd")
func TestTrackerFromString(t *testing.T) {
testcases := []string{"bug", "cluster_error", "pin_error", "unpin_error", "pinned", "pinning", "unpinning", "unpinned", "remote"}
for i, tc := range testcases {
if TrackerStatusFromString(tc).String() != TrackerStatus(i).String() {
t.Errorf("%s does not match %s", tc, i)
}
}
}
func TestIPFSPinStatusFromString(t *testing.T) {
testcases := []string{"direct", "recursive", "indirect"}
for i, tc := range testcases {
if IPFSPinStatusFromString(tc) != IPFSPinStatus(i+2) {
t.Errorf("%s does not match IPFSPinStatus %d", tc, i+2)
}
}
}
func TestGlobalPinInfoConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
gpi := GlobalPinInfo{
Cid: testCid1,
PeerMap: map[peer.ID]PinInfo{
testPeerID1: {
Cid: testCid1,
Peer: testPeerID1,
Status: TrackerStatusPinned,
TS: testTime,
},
},
}
newgpi := gpi.ToSerial().ToGlobalPinInfo()
if gpi.Cid.String() != newgpi.Cid.String() {
t.Error("mismatching CIDs")
}
if gpi.PeerMap[testPeerID1].Cid.String() != newgpi.PeerMap[testPeerID1].Cid.String() {
t.Error("mismatching PinInfo CIDs")
}
if !gpi.PeerMap[testPeerID1].TS.Equal(newgpi.PeerMap[testPeerID1].TS) {
t.Error("bad time")
}
}
func TestIDConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
id := ID{
ID: testPeerID1,
Addresses: []ma.Multiaddr{testMAddr},
ClusterPeers: []ma.Multiaddr{testMAddr},
Version: "testv",
Commit: "ab",
RPCProtocolVersion: "testp",
Error: "teste",
IPFS: IPFSID{
ID: testPeerID2,
Addresses: []ma.Multiaddr{testMAddr},
Error: "abc",
},
}
newid := id.ToSerial().ToID()
if id.ID != newid.ID {
t.Error("mismatching Peer IDs")
}
if !id.Addresses[0].Equal(newid.Addresses[0]) {
t.Error("mismatching addresses")
}
if !id.ClusterPeers[0].Equal(newid.ClusterPeers[0]) {
t.Error("mismatching clusterPeers")
}
if id.Version != newid.Version ||
id.Commit != newid.Commit ||
id.RPCProtocolVersion != newid.RPCProtocolVersion ||
id.Error != newid.Error {
t.Error("some field didn't survive")
}
if id.IPFS.ID != newid.IPFS.ID {
t.Error("ipfs daemon id mismatch")
}
if !id.IPFS.Addresses[0].Equal(newid.IPFS.Addresses[0]) {
t.Error("mismatching addresses")
}
if id.IPFS.Error != newid.IPFS.Error {
t.Error("ipfs error mismatch")
}
}
func TestMultiaddrConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
addrs := []ma.Multiaddr{testMAddr}
new := MultiaddrsToSerial(addrs).ToMultiaddrs()
if !addrs[0].Equal(new[0]) {
t.Error("mismatch")
}
}
func TestCidArgConv(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
c := CidArg{
Cid: testCid1,
Allocations: []peer.ID{testPeerID1},
Everywhere: true,
}
newc := c.ToSerial().ToCidArg()
if c.Cid.String() != newc.Cid.String() ||
c.Allocations[0] != newc.Allocations[0] ||
c.Everywhere != newc.Everywhere {
t.Error("mismatch")
}
}
func TestMetric(t *testing.T) {
m := Metric{
Name: "hello",
Value: "abc",
}
if !m.Expired() {
t.Error("metric should be expire")
}
m.SetTTL(1)
if m.Expired() {
t.Error("metric should not be expired")
}
// let it expire
time.Sleep(1500 * time.Millisecond)
if !m.Expired() {
t.Error("metric should be expired")
}
m.SetTTL(30)
m.Valid = true
if m.Discard() {
t.Error("metric should be valid")
}
m.Valid = false
if !m.Discard() {
t.Error("metric should be invalid")
}
ttl := m.GetTTL()
if ttl > 30*time.Second || ttl < 29*time.Second {
t.Error("looks like a bad ttl")
}
}

View File

@ -2,10 +2,13 @@ package ipfscluster
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
host "github.com/libp2p/go-libp2p-host"
@ -19,7 +22,8 @@ import (
// Cluster is the main IPFS cluster component. It provides
// the go-API for it and orchestrates the components that make up the system.
type Cluster struct {
ctx context.Context
ctx context.Context
cancel func()
id peer.ID
config *Config
@ -33,10 +37,12 @@ type Cluster struct {
ipfs IPFSConnector
state State
tracker PinTracker
monitor PeerMonitor
allocator PinAllocator
informer Informer
shutdownLock sync.Mutex
shutdown bool
shutdownCh chan struct{}
doneCh chan struct{}
readyCh chan struct{}
wg sync.WaitGroup
@ -50,8 +56,17 @@ type Cluster struct {
// The new cluster peer may still be performing initialization tasks when
// this call returns (consensus may still be bootstrapping). Use Cluster.Ready()
// if you need to wait until the peer is fully up.
func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker PinTracker) (*Cluster, error) {
ctx := context.Background()
func NewCluster(
cfg *Config,
api API,
ipfs IPFSConnector,
state State,
tracker PinTracker,
monitor PeerMonitor,
allocator PinAllocator,
informer Informer) (*Cluster, error) {
ctx, cancel := context.WithCancel(context.Background())
host, err := makeHost(ctx, cfg)
if err != nil {
return nil, err
@ -63,17 +78,20 @@ func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker P
}
c := &Cluster{
ctx: ctx,
id: host.ID(),
config: cfg,
host: host,
api: api,
ipfs: ipfs,
state: state,
tracker: tracker,
shutdownCh: make(chan struct{}, 1),
doneCh: make(chan struct{}, 1),
readyCh: make(chan struct{}, 1),
ctx: ctx,
cancel: cancel,
id: host.ID(),
config: cfg,
host: host,
api: api,
ipfs: ipfs,
state: state,
tracker: tracker,
monitor: monitor,
allocator: allocator,
informer: informer,
doneCh: make(chan struct{}),
readyCh: make(chan struct{}),
}
c.setupPeerManager()
@ -89,7 +107,17 @@ func NewCluster(cfg *Config, api API, ipfs IPFSConnector, state State, tracker P
return nil, err
}
c.setupRPCClients()
c.run()
c.bootstrap()
ok := c.bootstrap()
if !ok {
logger.Error("Bootstrap unsuccessful")
c.Shutdown()
return nil, errors.New("bootstrap unsuccessful")
}
go func() {
c.ready()
c.run()
}()
return c, nil
}
@ -106,7 +134,7 @@ func (c *Cluster) setupPeerManager() {
func (c *Cluster) setupRPC() error {
rpcServer := rpc.NewServer(c.host, RPCProtocol)
err := rpcServer.RegisterName("Cluster", &RPCAPI{cluster: c})
err := rpcServer.RegisterName("Cluster", &RPCAPI{c})
if err != nil {
return err
}
@ -142,8 +170,12 @@ func (c *Cluster) setupRPCClients() {
c.ipfs.SetClient(c.rpcClient)
c.api.SetClient(c.rpcClient)
c.consensus.SetClient(c.rpcClient)
c.monitor.SetClient(c.rpcClient)
c.allocator.SetClient(c.rpcClient)
c.informer.SetClient(c.rpcClient)
}
// stateSyncWatcher loops and triggers StateSync from time to time
func (c *Cluster) stateSyncWatcher() {
stateSyncTicker := time.NewTicker(
time.Duration(c.config.StateSyncSeconds) * time.Second)
@ -158,30 +190,52 @@ func (c *Cluster) stateSyncWatcher() {
}
}
// push metrics loops and pushes metrics to the leader's monitor
func (c *Cluster) pushInformerMetrics() {
timer := time.NewTimer(0) // fire immediately first
for {
select {
case <-c.ctx.Done():
return
case <-timer.C:
// wait
}
leader, err := c.consensus.Leader()
if err != nil {
// retry in 1 second
timer.Stop()
timer.Reset(1 * time.Second)
continue
}
metric := c.informer.GetMetric()
metric.Peer = c.id
err = c.rpcClient.Call(
leader,
"Cluster", "PeerMonitorLogMetric",
metric, &struct{}{})
if err != nil {
logger.Errorf("error pushing metric to %s", leader.Pretty())
}
logger.Debugf("pushed metric %s to %s", metric.Name, metric.Peer.Pretty())
timer.Stop() // no need to drain C if we are here
timer.Reset(metric.GetTTL() / 2)
}
}
// run provides a cancellable context and launches some goroutines
// before signaling readyCh
func (c *Cluster) run() {
c.wg.Add(1)
// cancellable context
go func() {
defer c.wg.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c.ctx = ctx
go c.stateSyncWatcher()
go c.bootstrapAndReady()
<-c.shutdownCh
}()
go c.stateSyncWatcher()
go c.pushInformerMetrics()
}
func (c *Cluster) bootstrapAndReady() {
ok := c.bootstrap()
if !ok {
logger.Error("Bootstrap unsuccessful")
c.Shutdown()
return
}
func (c *Cluster) ready() {
// We bootstrapped first because with dirty state consensus
// may have a peerset and not find a leader so we cannot wait
// for it.
@ -197,8 +251,6 @@ func (c *Cluster) bootstrapAndReady() {
}
// Cluster is ready.
c.readyCh <- struct{}{}
logger.Info("IPFS Cluster is ready")
logger.Info("Cluster Peers (not including ourselves):")
peers := c.peerManager.peersAddrs()
if len(peers) == 0 {
@ -207,6 +259,8 @@ func (c *Cluster) bootstrapAndReady() {
for _, a := range c.peerManager.peersAddrs() {
logger.Infof(" - %s", a)
}
close(c.readyCh)
logger.Info("IPFS Cluster is ready")
}
func (c *Cluster) bootstrap() bool {
@ -256,6 +310,9 @@ func (c *Cluster) Shutdown() error {
c.peerManager.resetPeers()
}
// Cancel contexts
c.cancel()
if con := c.consensus; con != nil {
if err := con.Shutdown(); err != nil {
logger.Errorf("error stopping consensus: %s", err)
@ -278,7 +335,6 @@ func (c *Cluster) Shutdown() error {
logger.Errorf("error stopping PinTracker: %s", err)
return err
}
c.shutdownCh <- struct{}{}
c.wg.Wait()
c.host.Close() // Shutdown all network services
c.shutdown = true
@ -293,7 +349,7 @@ func (c *Cluster) Done() <-chan struct{} {
}
// ID returns information about the Cluster peer
func (c *Cluster) ID() ID {
func (c *Cluster) ID() api.ID {
// ignore error since it is included in response object
ipfsID, _ := c.ipfs.ID()
var addrs []ma.Multiaddr
@ -301,9 +357,9 @@ func (c *Cluster) ID() ID {
addrs = append(addrs, multiaddrJoin(addr, c.host.ID()))
}
return ID{
ID: c.host.ID(),
PublicKey: c.host.Peerstore().PubKey(c.host.ID()),
return api.ID{
ID: c.host.ID(),
//PublicKey: c.host.Peerstore().PubKey(c.host.ID()),
Addresses: addrs,
ClusterPeers: c.peerManager.peersAddrs(),
Version: Version,
@ -319,7 +375,7 @@ func (c *Cluster) ID() ID {
// consensus and will receive the shared state (including the
// list of peers). The new peer should be a single-peer cluster,
// preferable without any relevant state.
func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
func (c *Cluster) PeerAdd(addr ma.Multiaddr) (api.ID, error) {
// starting 10 nodes on the same box for testing
// causes deadlock and a global lock here
// seems to help.
@ -328,7 +384,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
logger.Debugf("peerAdd called with %s", addr)
pid, decapAddr, err := multiaddrSplit(addr)
if err != nil {
id := ID{
id := api.ID{
Error: err.Error(),
}
return id, err
@ -340,18 +396,18 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.peerManager.addPeer(remoteAddr)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
return id, err
}
// Figure out our address to that peer. This also
// ensures that it is reachable
var addrSerial MultiaddrSerial
var addrSerial api.MultiaddrSerial
err = c.rpcClient.Call(pid, "Cluster",
"RemoteMultiaddrForPeer", c.host.ID(), &addrSerial)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
c.peerManager.rmPeer(pid, false)
return id, err
}
@ -360,7 +416,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.consensus.LogAddPeer(remoteAddr)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
c.peerManager.rmPeer(pid, false)
return id, err
}
@ -371,7 +427,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.rpcClient.Call(pid,
"Cluster",
"PeerManagerAddFromMultiaddrs",
MultiaddrsToSerial(clusterPeers),
api.MultiaddrsToSerial(clusterPeers),
&struct{}{})
if err != nil {
logger.Error(err)
@ -438,11 +494,11 @@ func (c *Cluster) Join(addr ma.Multiaddr) error {
// Note that PeerAdd() on the remote peer will
// figure out what our real address is (obviously not
// ClusterAddr).
var myID IDSerial
var myID api.IDSerial
err = c.rpcClient.Call(pid,
"Cluster",
"PeerAdd",
MultiaddrToSerial(multiaddrJoin(c.config.ClusterAddr, c.host.ID())),
api.MultiaddrToSerial(multiaddrJoin(c.config.ClusterAddr, c.host.ID())),
&myID)
if err != nil {
logger.Error(err)
@ -465,39 +521,33 @@ func (c *Cluster) Join(addr ma.Multiaddr) error {
// StateSync syncs the consensus state to the Pin Tracker, ensuring
// that every Cid that should be tracked is tracked. It returns
// PinInfo for Cids which were added or deleted.
func (c *Cluster) StateSync() ([]PinInfo, error) {
func (c *Cluster) StateSync() ([]api.PinInfo, error) {
cState, err := c.consensus.State()
if err != nil {
return nil, err
}
logger.Debug("syncing state to tracker")
clusterPins := cState.ListPins()
clusterPins := cState.List()
var changed []*cid.Cid
// For the moment we run everything in parallel.
// The PinTracker should probably decide if it can
// pin in parallel or queues everything and does it
// one by one
// Track items which are not tracked
for _, h := range clusterPins {
if c.tracker.Status(h).Status == TrackerStatusUnpinned {
changed = append(changed, h)
go c.tracker.Track(h)
for _, carg := range clusterPins {
if c.tracker.Status(carg.Cid).Status == api.TrackerStatusUnpinned {
changed = append(changed, carg.Cid)
go c.tracker.Track(carg)
}
}
// Untrack items which should not be tracked
for _, p := range c.tracker.StatusAll() {
h, _ := cid.Decode(p.CidStr)
if !cState.HasPin(h) {
changed = append(changed, h)
go c.tracker.Untrack(h)
if !cState.Has(p.Cid) {
changed = append(changed, p.Cid)
go c.tracker.Untrack(p.Cid)
}
}
var infos []PinInfo
var infos []api.PinInfo
for _, h := range changed {
infos = append(infos, c.tracker.Status(h))
}
@ -506,13 +556,13 @@ func (c *Cluster) StateSync() ([]PinInfo, error) {
// StatusAll returns the GlobalPinInfo for all tracked Cids. If an error
// happens, the slice will contain as much information as could be fetched.
func (c *Cluster) StatusAll() ([]GlobalPinInfo, error) {
func (c *Cluster) StatusAll() ([]api.GlobalPinInfo, error) {
return c.globalPinInfoSlice("TrackerStatusAll")
}
// Status returns the GlobalPinInfo for a given Cid. If an error happens,
// the GlobalPinInfo should contain as much information as could be fetched.
func (c *Cluster) Status(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Status(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerStatus", h)
}
@ -521,14 +571,13 @@ func (c *Cluster) Status(h *cid.Cid) (GlobalPinInfo, error) {
//
// SyncAllLocal returns the list of PinInfo that where updated because of
// the operation, along with those in error states.
func (c *Cluster) SyncAllLocal() ([]PinInfo, error) {
func (c *Cluster) SyncAllLocal() ([]api.PinInfo, error) {
syncedItems, err := c.tracker.SyncAll()
// Despite errors, tracker provides synced items that we can provide.
// They encapsulate the error.
if err != nil {
logger.Error("tracker.Sync() returned with error: ", err)
logger.Error("Is the ipfs daemon running?")
logger.Error("LocalSync returning without attempting recovers")
}
return syncedItems, err
}
@ -536,7 +585,7 @@ func (c *Cluster) SyncAllLocal() ([]PinInfo, error) {
// SyncLocal performs a local sync operation for the given Cid. This will
// tell the tracker to verify the status of the Cid against the IPFS daemon.
// It returns the updated PinInfo for the Cid.
func (c *Cluster) SyncLocal(h *cid.Cid) (PinInfo, error) {
func (c *Cluster) SyncLocal(h *cid.Cid) (api.PinInfo, error) {
var err error
pInfo, err := c.tracker.Sync(h)
// Despite errors, trackers provides an updated PinInfo so
@ -549,37 +598,38 @@ func (c *Cluster) SyncLocal(h *cid.Cid) (PinInfo, error) {
}
// SyncAll triggers LocalSync() operations in all cluster peers.
func (c *Cluster) SyncAll() ([]GlobalPinInfo, error) {
func (c *Cluster) SyncAll() ([]api.GlobalPinInfo, error) {
return c.globalPinInfoSlice("SyncAllLocal")
}
// Sync triggers a LocalSyncCid() operation for a given Cid
// in all cluster peers.
func (c *Cluster) Sync(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Sync(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("SyncLocal", h)
}
// RecoverLocal triggers a recover operation for a given Cid
func (c *Cluster) RecoverLocal(h *cid.Cid) (PinInfo, error) {
func (c *Cluster) RecoverLocal(h *cid.Cid) (api.PinInfo, error) {
return c.tracker.Recover(h)
}
// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Recover(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerRecover", h)
}
// Pins returns the list of Cids managed by Cluster and which are part
// of the current global state. This is the source of truth as to which
// pins are managed, but does not indicate if the item is successfully pinned.
func (c *Cluster) Pins() []*cid.Cid {
func (c *Cluster) Pins() []api.CidArg {
cState, err := c.consensus.State()
if err != nil {
logger.Error(err)
return []*cid.Cid{}
return []api.CidArg{}
}
return cState.ListPins()
return cState.List()
}
// Pin makes the cluster Pin a Cid. This implies adding the Cid
@ -592,7 +642,26 @@ func (c *Cluster) Pins() []*cid.Cid {
// of underlying IPFS daemon pinning operations.
func (c *Cluster) Pin(h *cid.Cid) error {
logger.Info("pinning:", h)
err := c.consensus.LogPin(h)
cidArg := api.CidArg{
Cid: h,
}
rpl := c.config.ReplicationFactor
switch {
case rpl == 0:
return errors.New("replication factor is 0")
case rpl < 0:
cidArg.Everywhere = true
case rpl > 0:
allocs, err := c.allocate(h)
if err != nil {
return err
}
cidArg.Allocations = allocs
}
err := c.consensus.LogPin(cidArg)
if err != nil {
return err
}
@ -607,7 +676,12 @@ func (c *Cluster) Pin(h *cid.Cid) error {
// of underlying IPFS daemon unpinning operations.
func (c *Cluster) Unpin(h *cid.Cid) error {
logger.Info("unpinning:", h)
err := c.consensus.LogUnpin(h)
carg := api.CidArg{
Cid: h,
}
err := c.consensus.LogUnpin(carg)
if err != nil {
return err
}
@ -620,10 +694,10 @@ func (c *Cluster) Version() string {
}
// Peers returns the IDs of the members of this Cluster
func (c *Cluster) Peers() []ID {
func (c *Cluster) Peers() []api.ID {
members := c.peerManager.peers()
peersSerial := make([]IDSerial, len(members), len(members))
peers := make([]ID, len(members), len(members))
peersSerial := make([]api.IDSerial, len(members), len(members))
peers := make([]api.ID, len(members), len(members))
errs := c.multiRPC(members, "Cluster", "ID", struct{}{},
copyIDSerialsToIfaces(peersSerial))
@ -697,25 +771,32 @@ func (c *Cluster) multiRPC(dests []peer.ID, svcName, svcMethod string, args inte
}
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (GlobalPinInfo, error) {
pin := GlobalPinInfo{
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (api.GlobalPinInfo, error) {
pin := api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[peer.ID]PinInfo),
PeerMap: make(map[peer.ID]api.PinInfo),
}
members := c.peerManager.peers()
replies := make([]PinInfo, len(members), len(members))
args := NewCidArg(h)
errs := c.multiRPC(members, "Cluster", method, args, copyPinInfoToIfaces(replies))
replies := make([]api.PinInfoSerial, len(members), len(members))
arg := api.CidArg{
Cid: h,
}
errs := c.multiRPC(members,
"Cluster",
method, arg.ToSerial(),
copyPinInfoSerialToIfaces(replies))
for i, r := range replies {
if e := errs[i]; e != nil { // This error must come from not being able to contact that cluster member
logger.Errorf("%s: error in broadcast response from %s: %s ", c.host.ID(), members[i], e)
if r.Status == TrackerStatusBug {
r = PinInfo{
CidStr: h.String(),
for i, rserial := range replies {
r := rserial.ToPinInfo()
if e := errs[i]; e != nil {
if r.Status == api.TrackerStatusBug {
// This error must come from not being able to contact that cluster member
logger.Errorf("%s: error in broadcast response from %s: %s ", c.host.ID(), members[i], e)
r = api.PinInfo{
Cid: r.Cid,
Peer: members[i],
Status: TrackerStatusClusterError,
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: e.Error(),
}
@ -729,22 +810,25 @@ func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (GlobalPinInfo, er
return pin, nil
}
func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
var infos []GlobalPinInfo
fullMap := make(map[string]GlobalPinInfo)
func (c *Cluster) globalPinInfoSlice(method string) ([]api.GlobalPinInfo, error) {
var infos []api.GlobalPinInfo
fullMap := make(map[string]api.GlobalPinInfo)
members := c.peerManager.peers()
replies := make([][]PinInfo, len(members), len(members))
errs := c.multiRPC(members, "Cluster", method, struct{}{}, copyPinInfoSliceToIfaces(replies))
replies := make([][]api.PinInfoSerial, len(members), len(members))
errs := c.multiRPC(members,
"Cluster",
method, struct{}{},
copyPinInfoSerialSliceToIfaces(replies))
mergePins := func(pins []PinInfo) {
for _, p := range pins {
item, ok := fullMap[p.CidStr]
c, _ := cid.Decode(p.CidStr)
mergePins := func(pins []api.PinInfoSerial) {
for _, pserial := range pins {
p := pserial.ToPinInfo()
item, ok := fullMap[pserial.Cid]
if !ok {
fullMap[p.CidStr] = GlobalPinInfo{
Cid: c,
PeerMap: map[peer.ID]PinInfo{
fullMap[pserial.Cid] = api.GlobalPinInfo{
Cid: p.Cid,
PeerMap: map[peer.ID]api.PinInfo{
p.Peer: p,
},
}
@ -766,11 +850,12 @@ func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
// Merge any errors
for p, msg := range erroredPeers {
for c := range fullMap {
fullMap[c].PeerMap[p] = PinInfo{
CidStr: c,
for cidStr := range fullMap {
c, _ := cid.Decode(cidStr)
fullMap[cidStr].PeerMap[p] = api.PinInfo{
Cid: c,
Peer: p,
Status: TrackerStatusClusterError,
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: msg,
}
@ -784,8 +869,8 @@ func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
return infos, nil
}
func (c *Cluster) getIDForPeer(pid peer.ID) (ID, error) {
idSerial := ID{ID: pid}.ToSerial()
func (c *Cluster) getIDForPeer(pid peer.ID) (api.ID, error) {
idSerial := api.ID{ID: pid}.ToSerial()
err := c.rpcClient.Call(
pid, "Cluster", "ID", struct{}{}, &idSerial)
id := idSerial.ToID()
@ -795,3 +880,102 @@ func (c *Cluster) getIDForPeer(pid peer.ID) (ID, error) {
}
return id, err
}
// allocate finds peers to allocate a hash using the informer and the monitor
// it should only be used with a positive replication factor
func (c *Cluster) allocate(hash *cid.Cid) ([]peer.ID, error) {
if c.config.ReplicationFactor <= 0 {
return nil, errors.New("cannot decide allocation for replication factor <= 0")
}
// Figure out who is currently holding this
var currentlyAllocatedPeers []peer.ID
st, err := c.consensus.State()
if err != nil {
// no state we assume it is empty. If there was other
// problem, we would fail to commit anyway.
currentlyAllocatedPeers = []peer.ID{}
} else {
carg := st.Get(hash)
currentlyAllocatedPeers = carg.Allocations
}
// initialize a candidate metrics map with all current clusterPeers
// (albeit with invalid metrics)
clusterPeers := c.peerManager.peers()
metricsMap := make(map[peer.ID]api.Metric)
for _, cp := range clusterPeers {
metricsMap[cp] = api.Metric{Valid: false}
}
// Request latest metrics logged by informers from the leader
metricName := c.informer.Name()
l, err := c.consensus.Leader()
if err != nil {
return nil, errors.New("cannot determine leading Monitor")
}
var metrics []api.Metric
err = c.rpcClient.Call(l,
"Cluster", "PeerMonitorLastMetrics",
metricName,
&metrics)
if err != nil {
return nil, err
}
// put metrics in the metricsMap if they belong to a current clusterPeer
for _, m := range metrics {
_, ok := metricsMap[m.Peer]
if !ok {
continue
}
metricsMap[m.Peer] = m
}
// Remove any invalid metric. This will clear any cluster peers
// for which we did not receive metrics.
for p, m := range metricsMap {
if m.Discard() {
delete(metricsMap, p)
}
}
// Move metrics from currentlyAllocatedPeers to a new map
currentlyAllocatedPeersMetrics := make(map[peer.ID]api.Metric)
for _, p := range currentlyAllocatedPeers {
m, ok := metricsMap[p]
if !ok {
continue
}
currentlyAllocatedPeersMetrics[p] = m
delete(metricsMap, p)
}
// how many allocations do we need (note we will re-allocate if we did
// not receive good metrics for currently allocated peeers)
needed := c.config.ReplicationFactor - len(currentlyAllocatedPeersMetrics)
// if we are already good (note invalid metrics would trigger
// re-allocations as they are not included in currentAllocMetrics)
if needed <= 0 {
return nil, fmt.Errorf("CID is already correctly allocated to %s", currentlyAllocatedPeers)
}
// Allocate is called with currentAllocMetrics which contains
// only currentlyAllocatedPeers when they have provided valid metrics.
candidateAllocs, err := c.allocator.Allocate(hash, currentlyAllocatedPeersMetrics, metricsMap)
if err != nil {
return nil, logError(err.Error())
}
// we don't have enough peers to pin
if len(candidateAllocs) < needed {
err = logError("cannot find enough allocations for this CID: needed: %d. Got: %s",
needed, candidateAllocs)
return nil, err
}
// return as many as needed
return candidateAllocs[0:needed], nil
}

View File

@ -4,6 +4,12 @@ import (
"errors"
"testing"
"github.com/ipfs/ipfs-cluster/allocator/numpinalloc"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
)
@ -30,12 +36,12 @@ type mockConnector struct {
mockComponent
}
func (ipfs *mockConnector) ID() (IPFSID, error) {
func (ipfs *mockConnector) ID() (api.IPFSID, error) {
if ipfs.returnError {
return IPFSID{}, errors.New("")
return api.IPFSID{}, errors.New("")
}
return IPFSID{
ID: testPeerID,
return api.IPFSID{
ID: test.TestPeerID1,
}, nil
}
@ -53,27 +59,30 @@ func (ipfs *mockConnector) Unpin(c *cid.Cid) error {
return nil
}
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (api.IPFSPinStatus, error) {
if ipfs.returnError {
return IPFSPinStatusError, errors.New("")
return api.IPFSPinStatusError, errors.New("")
}
return IPFSPinStatusRecursive, nil
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs() (map[string]IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLs(filter string) (map[string]api.IPFSPinStatus, error) {
if ipfs.returnError {
return nil, errors.New("")
}
m := make(map[string]IPFSPinStatus)
m := make(map[string]api.IPFSPinStatus)
return m, nil
}
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *MapState, *MapPinTracker) {
func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *MapPinTracker) {
api := &mockAPI{}
ipfs := &mockConnector{}
cfg := testingConfig()
st := NewMapState()
st := mapstate.NewMapState()
tracker := NewMapPinTracker(cfg)
mon := NewStdPeerMonitor(5)
alloc := numpinalloc.NewAllocator()
inf := numpin.NewInformer()
cl, err := NewCluster(
cfg,
@ -81,7 +90,9 @@ func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *MapState
ipfs,
st,
tracker,
)
mon,
alloc,
inf)
if err != nil {
t.Fatal("cannot create cluster:", err)
}
@ -109,10 +120,10 @@ func TestClusterStateSync(t *testing.T) {
defer cl.Shutdown()
_, err := cl.StateSync()
if err == nil {
t.Error("expected an error as there is no state to sync")
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err = cl.Pin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
@ -125,7 +136,7 @@ func TestClusterStateSync(t *testing.T) {
// Modify state on the side so the sync does not
// happen on an empty slide
st.RmPin(c)
st.Rm(c)
_, err = cl.StateSync()
if err != nil {
t.Fatal("sync with recover should have worked:", err)
@ -146,9 +157,9 @@ func TestClusterID(t *testing.T) {
if id.Version != Version {
t.Error("version should match current version")
}
if id.PublicKey == nil {
t.Error("publicKey should not be empty")
}
//if id.PublicKey == nil {
// t.Error("publicKey should not be empty")
//}
}
func TestClusterPin(t *testing.T) {
@ -156,7 +167,7 @@ func TestClusterPin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(c)
if err != nil {
t.Fatal("pin should have worked:", err)
@ -175,7 +186,7 @@ func TestClusterUnpin(t *testing.T) {
defer cleanRaft()
defer cl.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := cl.Unpin(c)
if err != nil {
t.Fatal("pin should have worked:", err)

View File

@ -69,6 +69,9 @@ type Config struct {
// Number of seconds between StateSync() operations
StateSyncSeconds int
// ReplicationFactor is the number of copies we keep for each pin
ReplicationFactor int
// if a config has been loaded from disk, track the path
// so it can be saved to the same place.
path string
@ -125,6 +128,12 @@ type JSONConfig struct {
// tracker state. Normally states are synced anyway, but this helps
// when new nodes are joining the cluster
StateSyncSeconds int `json:"state_sync_seconds"`
// ReplicationFactor indicates the number of nodes that must pin content.
// For exampe, a replication_factor of 2 will prompt cluster to choose
// two nodes for each pinned hash. A replication_factor -1 will
// use every available node for each pin.
ReplicationFactor int `json:"replication_factor"`
}
// ToJSONConfig converts a Config object to its JSON representation which
@ -164,6 +173,7 @@ func (cfg *Config) ToJSONConfig() (j *JSONConfig, err error) {
IPFSNodeMultiaddress: cfg.IPFSNodeAddr.String(),
ConsensusDataFolder: cfg.ConsensusDataFolder,
StateSyncSeconds: cfg.StateSyncSeconds,
ReplicationFactor: cfg.ReplicationFactor,
}
return
}
@ -232,6 +242,11 @@ func (jcfg *JSONConfig) ToConfig() (c *Config, err error) {
return
}
if jcfg.ReplicationFactor == 0 {
logger.Warning("Replication factor set to -1 (pin everywhere)")
jcfg.ReplicationFactor = -1
}
if jcfg.StateSyncSeconds <= 0 {
jcfg.StateSyncSeconds = DefaultStateSyncSeconds
}
@ -248,6 +263,7 @@ func (jcfg *JSONConfig) ToConfig() (c *Config, err error) {
IPFSNodeAddr: ipfsNodeAddr,
ConsensusDataFolder: jcfg.ConsensusDataFolder,
StateSyncSeconds: jcfg.StateSyncSeconds,
ReplicationFactor: jcfg.ReplicationFactor,
}
return
}
@ -331,5 +347,6 @@ func NewDefaultConfig() (*Config, error) {
IPFSNodeAddr: ipfsNodeAddr,
ConsensusDataFolder: "ipfscluster-data",
StateSyncSeconds: DefaultStateSyncSeconds,
ReplicationFactor: -1,
}, nil
}

View File

@ -6,8 +6,9 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
@ -15,14 +16,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
)
// Type of pin operation
const (
LogOpPin = iota + 1
LogOpUnpin
LogOpAddPeer
LogOpRmPeer
)
// LeaderTimeout specifies how long to wait before failing an operation
// because there is no leader
var LeaderTimeout = 15 * time.Second
@ -31,95 +24,6 @@ var LeaderTimeout = 15 * time.Second
// we give up
var CommitRetries = 2
type clusterLogOpType int
// clusterLogOp represents an operation for the OpLogConsensus system.
// It implements the consensus.Op interface.
type clusterLogOp struct {
Arg string
Type clusterLogOpType
ctx context.Context
rpcClient *rpc.Client
}
// ApplyTo applies the operation to the State
func (op *clusterLogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
state, ok := cstate.(State)
var err error
if !ok {
// Should never be here
panic("received unexpected state type")
}
switch op.Type {
case LogOpPin:
c, err := cid.Decode(op.Arg)
if err != nil {
panic("could not decode a CID we ourselves encoded")
}
err = state.AddPin(c)
if err != nil {
goto ROLLBACK
}
// Async, we let the PinTracker take care of any problems
op.rpcClient.Go("",
"Cluster",
"Track",
NewCidArg(c),
&struct{}{},
nil)
case LogOpUnpin:
c, err := cid.Decode(op.Arg)
if err != nil {
panic("could not decode a CID we ourselves encoded")
}
err = state.RmPin(c)
if err != nil {
goto ROLLBACK
}
// Async, we let the PinTracker take care of any problems
op.rpcClient.Go("",
"Cluster",
"Untrack",
NewCidArg(c),
&struct{}{},
nil)
case LogOpAddPeer:
addr, err := ma.NewMultiaddr(op.Arg)
if err != nil {
panic("could not decode a multiaddress we ourselves encoded")
}
op.rpcClient.Call("",
"Cluster",
"PeerManagerAddPeer",
MultiaddrToSerial(addr),
&struct{}{})
// TODO rebalance ops
case LogOpRmPeer:
pid, err := peer.IDB58Decode(op.Arg)
if err != nil {
panic("could not decode a PID we ourselves encoded")
}
op.rpcClient.Call("",
"Cluster",
"PeerManagerRmPeer",
pid,
&struct{}{})
// TODO rebalance ops
default:
logger.Error("unknown clusterLogOp type. Ignoring")
}
return state, nil
ROLLBACK:
// We failed to apply the operation to the state
// and therefore we need to request a rollback to the
// cluster to the previous state. This operation can only be performed
// by the cluster leader.
logger.Error("Rollbacks are not implemented")
return nil, errors.New("a rollback may be necessary. Reason: " + err.Error())
}
// Consensus handles the work of keeping a shared-state between
// the peers of an IPFS Cluster, as well as modifying that state and
// applying any updates in a thread-safe manner.
@ -130,7 +34,7 @@ type Consensus struct {
consensus consensus.OpLogConsensus
actor consensus.Actor
baseOp *clusterLogOp
baseOp *LogOp
raft *Raft
rpcClient *rpc.Client
@ -148,7 +52,7 @@ type Consensus struct {
// is discarded.
func NewConsensus(clusterPeers []peer.ID, host host.Host, dataFolder string, state State) (*Consensus, error) {
ctx := context.Background()
op := &clusterLogOp{
op := &LogOp{
ctx: context.Background(),
}
@ -231,13 +135,13 @@ func (cc *Consensus) finishBootstrap() {
if err != nil {
logger.Debug("skipping state sync: ", err)
} else {
var pInfo []PinInfo
var pInfoSerial []api.PinInfoSerial
cc.rpcClient.Go(
"",
"Cluster",
"StateSync",
struct{}{},
&pInfo,
&pInfoSerial,
nil)
}
cc.readyCh <- struct{}{}
@ -295,22 +199,21 @@ func (cc *Consensus) Ready() <-chan struct{} {
return cc.readyCh
}
func (cc *Consensus) op(argi interface{}, t clusterLogOpType) *clusterLogOp {
var arg string
func (cc *Consensus) op(argi interface{}, t LogOpType) *LogOp {
switch argi.(type) {
case *cid.Cid:
arg = argi.(*cid.Cid).String()
case peer.ID:
arg = peer.IDB58Encode(argi.(peer.ID))
case api.CidArg:
return &LogOp{
Cid: argi.(api.CidArg).ToSerial(),
Type: t,
}
case ma.Multiaddr:
arg = argi.(ma.Multiaddr).String()
return &LogOp{
Peer: api.MultiaddrToSerial(argi.(ma.Multiaddr)),
Type: t,
}
default:
panic("bad type")
}
return &clusterLogOp{
Arg: arg,
Type: t,
}
}
// returns true if the operation was redirected to the leader
@ -337,11 +240,12 @@ func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, err
return true, err
}
func (cc *Consensus) logOpCid(rpcOp string, opType clusterLogOpType, c *cid.Cid) error {
func (cc *Consensus) logOpCid(rpcOp string, opType LogOpType, carg api.CidArg) error {
var finalErr error
for i := 0; i < CommitRetries; i++ {
logger.Debugf("Try %d", i)
redirected, err := cc.redirectToLeader(rpcOp, NewCidArg(c))
redirected, err := cc.redirectToLeader(
rpcOp, carg.ToSerial())
if err != nil {
finalErr = err
continue
@ -353,8 +257,7 @@ func (cc *Consensus) logOpCid(rpcOp string, opType clusterLogOpType, c *cid.Cid)
// It seems WE are the leader.
// Create pin operation for the log
op := cc.op(c, opType)
op := cc.op(carg, opType)
_, err = cc.consensus.CommitOp(op)
if err != nil {
// This means the op did not make it to the log
@ -371,21 +274,21 @@ func (cc *Consensus) logOpCid(rpcOp string, opType clusterLogOpType, c *cid.Cid)
switch opType {
case LogOpPin:
logger.Infof("pin committed to global state: %s", c)
logger.Infof("pin committed to global state: %s", carg.Cid)
case LogOpUnpin:
logger.Infof("unpin committed to global state: %s", c)
logger.Infof("unpin committed to global state: %s", carg.Cid)
}
return nil
}
// LogPin submits a Cid to the shared state of the cluster. It will forward
// the operation to the leader if this is not it.
func (cc *Consensus) LogPin(c *cid.Cid) error {
func (cc *Consensus) LogPin(c api.CidArg) error {
return cc.logOpCid("ConsensusLogPin", LogOpPin, c)
}
// LogUnpin removes a Cid from the shared state of the cluster.
func (cc *Consensus) LogUnpin(c *cid.Cid) error {
func (cc *Consensus) LogUnpin(c api.CidArg) error {
return cc.logOpCid("ConsensusLogUnpin", LogOpUnpin, c)
}
@ -395,7 +298,8 @@ func (cc *Consensus) LogAddPeer(addr ma.Multiaddr) error {
var finalErr error
for i := 0; i < CommitRetries; i++ {
logger.Debugf("Try %d", i)
redirected, err := cc.redirectToLeader("ConsensusLogAddPeer", MultiaddrToSerial(addr))
redirected, err := cc.redirectToLeader(
"ConsensusLogAddPeer", api.MultiaddrToSerial(addr))
if err != nil {
finalErr = err
continue
@ -454,7 +358,11 @@ func (cc *Consensus) LogRmPeer(pid peer.ID) error {
// It seems WE are the leader.
// Create pin operation for the log
op := cc.op(pid, LogOpRmPeer)
addr, err := ma.NewMultiaddr("/ipfs/" + peer.IDB58Encode(pid))
if err != nil {
return err
}
op := cc.op(addr, LogOpRmPeer)
_, err = cc.consensus.CommitOp(op)
if err != nil {
// This means the op did not make it to the log

View File

@ -6,80 +6,14 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
)
func TestApplyToPin(t *testing.T) {
op := &clusterLogOp{
Arg: testCid,
Type: LogOpPin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
}
st := NewMapState()
op.ApplyTo(st)
pins := st.ListPins()
if len(pins) != 1 || pins[0].String() != testCid {
t.Error("the state was not modified correctly")
}
}
func TestApplyToUnpin(t *testing.T) {
op := &clusterLogOp{
Arg: testCid,
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
}
st := NewMapState()
c, _ := cid.Decode(testCid)
st.AddPin(c)
op.ApplyTo(st)
pins := st.ListPins()
if len(pins) != 0 {
t.Error("the state was not modified correctly")
}
}
func TestApplyToBadState(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("should have recovered an error")
}
}()
op := &clusterLogOp{
Arg: testCid,
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
}
var st interface{}
op.ApplyTo(st)
}
func TestApplyToBadCid(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("should have recovered an error")
}
}()
op := &clusterLogOp{
Arg: "agadfaegf",
Type: LogOpPin,
ctx: context.Background(),
rpcClient: mockRPCClient(t),
}
st := NewMapState()
op.ApplyTo(st)
}
func cleanRaft() {
os.RemoveAll(testingConfig().ConsensusDataFolder)
}
@ -92,12 +26,12 @@ func testingConsensus(t *testing.T) *Consensus {
if err != nil {
t.Fatal("cannot create host:", err)
}
st := NewMapState()
st := mapstate.NewMapState()
cc, err := NewConsensus([]peer.ID{cfg.ID}, h, cfg.ConsensusDataFolder, st)
if err != nil {
t.Fatal("cannot create Consensus:", err)
}
cc.SetClient(mockRPCClient(t))
cc.SetClient(test.NewMockRPCClient(t))
<-cc.Ready()
return cc
}
@ -124,8 +58,8 @@ func TestConsensusPin(t *testing.T) {
defer cleanRaft() // Remember defer runs in LIFO order
defer cc.Shutdown()
c, _ := cid.Decode(testCid)
err := cc.LogPin(c)
c, _ := cid.Decode(test.TestCid1)
err := cc.LogPin(api.CidArg{Cid: c, Everywhere: true})
if err != nil {
t.Error("the operation did not make it to the log:", err)
}
@ -136,8 +70,8 @@ func TestConsensusPin(t *testing.T) {
t.Fatal("error gettinng state:", err)
}
pins := st.ListPins()
if len(pins) != 1 || pins[0].String() != testCid {
pins := st.List()
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
t.Error("the added pin should be in the state")
}
}
@ -147,8 +81,8 @@ func TestConsensusUnpin(t *testing.T) {
defer cleanRaft()
defer cc.Shutdown()
c, _ := cid.Decode(testCid2)
err := cc.LogUnpin(c)
c, _ := cid.Decode(test.TestCid2)
err := cc.LogUnpin(api.CidArgCid(c))
if err != nil {
t.Error("the operation did not make it to the log:", err)
}

28
coverage.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
if [ -z $COVERALLS_TOKEN ]
then
exit 1
fi
echo "mode: count" > fullcov.out
dirs=$(find ./* -maxdepth 10 -type d )
dirs=". $dirs"
for dir in $dirs;
do
if ls "$dir"/*.go &> /dev/null;
then
go test -v -coverprofile=profile.out -covermode=count -tags silent "$dir"
if [ $? -ne 0 ];
then
exit 1
fi
if [ -f profile.out ]
then
cat profile.out | grep -v "^mode: count" >> fullcov.out
fi
fi
done
$HOME/gopath/bin/goveralls -coverprofile=fullcov.out -service=travis-ci -repotoken $COVERALLS_TOKEN
rm -rf ./profile.out
rm -rf ./fullcov.out

77
informer/numpin/numpin.go Normal file
View File

@ -0,0 +1,77 @@
// Package numpin implements an ipfs-cluster informer which determines how many
// items this peer is pinning and returns it as api.Metric
package numpin
import (
"fmt"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
"github.com/ipfs/ipfs-cluster/api"
)
// MetricTTL specifies how long our reported metric is valid in seconds.
var MetricTTL = 10
// MetricName specifies the name of our metric
var MetricName = "numpin"
// Informer is a simple object to implement the ipfscluster.Informer
// and Component interfaces
type Informer struct {
rpcClient *rpc.Client
}
func NewInformer() *Informer {
return &Informer{}
}
// SetClient provides us with an rpc.Client which allows
// contacting other components in the cluster.
func (npi *Informer) SetClient(c *rpc.Client) {
npi.rpcClient = c
}
// Shutdown is called on cluster shutdown. We just invalidate
// any metrics from this point.
func (npi *Informer) Shutdown() error {
npi.rpcClient = nil
return nil
}
// Name returns the name of this informer
func (npi *Informer) Name() string {
return MetricName
}
// GetMetric contacts the IPFSConnector component and
// requests the `pin ls` command. We return the number
// of pins in IPFS.
func (npi *Informer) GetMetric() api.Metric {
if npi.rpcClient == nil {
return api.Metric{
Valid: false,
}
}
pinMap := make(map[string]api.IPFSPinStatus)
// make use of the RPC API to obtain information
// about the number of pins in IPFS. See RPCAPI docs.
err := npi.rpcClient.Call("", // Local call
"Cluster", // Service name
"IPFSPinLs", // Method name
"recursive", // in arg
&pinMap) // out arg
valid := err == nil
m := api.Metric{
Name: MetricName,
Value: fmt.Sprintf("%d", len(pinMap)),
Valid: valid,
}
m.SetTTL(MetricTTL)
return m
}

View File

@ -0,0 +1,45 @@
package numpin
import (
"testing"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
)
type mockService struct{}
func mockRPCClient(t *testing.T) *rpc.Client {
s := rpc.NewServer(nil, "mock")
c := rpc.NewClientWithServer(nil, "mock", s)
err := s.RegisterName("Cluster", &mockService{})
if err != nil {
t.Fatal(err)
}
return c
}
func (mock *mockService) IPFSPinLs(in string, out *map[string]api.IPFSPinStatus) error {
*out = map[string]api.IPFSPinStatus{
"QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa": api.IPFSPinStatusRecursive,
"QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6": api.IPFSPinStatusRecursive,
}
return nil
}
func Test(t *testing.T) {
inf := NewInformer()
m := inf.GetMetric()
if m.Valid {
t.Error("metric should be invalid")
}
inf.SetClient(mockRPCClient(t))
m = inf.GetMetric()
if !m.Valid {
t.Error("metric should be valid")
}
if m.Value != "2" {
t.Error("bad metric value")
}
}

View File

@ -0,0 +1,115 @@
package main
import (
"encoding/json"
"fmt"
"strings"
"github.com/ipfs/ipfs-cluster/api"
)
const (
formatNone = iota
formatID
formatGPInfo
formatString
formatVersion
formatCidArg
)
type format int
func textFormat(body []byte, format int) {
if len(body) < 2 {
fmt.Println("")
}
slice := body[0] == '['
if slice {
textFormatSlice(body, format)
} else {
textFormatObject(body, format)
}
}
func textFormatObject(body []byte, format int) {
switch format {
case formatID:
var obj api.IDSerial
textFormatDecodeOn(body, &obj)
textFormatPrintIDSerial(&obj)
case formatGPInfo:
var obj api.GlobalPinInfoSerial
textFormatDecodeOn(body, &obj)
textFormatPrintGPinfo(&obj)
case formatVersion:
var obj api.Version
textFormatDecodeOn(body, &obj)
textFormatPrintVersion(&obj)
case formatCidArg:
var obj api.CidArgSerial
textFormatDecodeOn(body, &obj)
textFormatPrintCidArg(&obj)
default:
var obj interface{}
textFormatDecodeOn(body, &obj)
fmt.Printf("%s\n", obj)
}
}
func textFormatSlice(body []byte, format int) {
var rawMsg []json.RawMessage
textFormatDecodeOn(body, &rawMsg)
for _, raw := range rawMsg {
textFormatObject(raw, format)
}
}
func textFormatDecodeOn(body []byte, obj interface{}) {
checkErr("decoding JSON", json.Unmarshal(body, obj))
}
func textFormatPrintIDSerial(obj *api.IDSerial) {
if obj.Error != "" {
fmt.Printf("%s | ERROR: %s\n", obj.ID, obj.Error)
return
}
fmt.Printf("%s | %d peers\n", obj.ID, len(obj.ClusterPeers))
fmt.Println(" > Addresses:")
for _, a := range obj.Addresses {
fmt.Printf(" - %s\n", a)
}
if obj.IPFS.Error != "" {
fmt.Printf(" > IPFS ERROR: %s\n", obj.IPFS.Error)
return
}
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID)
for _, a := range obj.IPFS.Addresses {
fmt.Printf(" - %s\n", a)
}
}
func textFormatPrintGPinfo(obj *api.GlobalPinInfoSerial) {
fmt.Printf("%s:\n", obj.Cid)
for k, v := range obj.PeerMap {
if v.Error != "" {
fmt.Printf(" - %s ERROR: %s\n", k, v.Error)
continue
}
fmt.Printf(" > Peer %s: %s | %s\n", k, strings.ToUpper(v.Status), v.TS)
}
}
func textFormatPrintVersion(obj *api.Version) {
fmt.Println(obj.Version)
}
func textFormatPrintCidArg(obj *api.CidArgSerial) {
fmt.Printf("%s | Allocations: ", obj.Cid)
if obj.Everywhere {
fmt.Printf("[everywhere]\n")
} else {
fmt.Printf("%s", obj.Allocations)
}
}

View File

@ -90,6 +90,11 @@ func main() {
Name: "https, s",
Usage: "use https to connect to the API",
},
cli.StringFlag{
Name: "encoding, enc",
Value: "text",
Usage: "output format encoding [text, json]",
},
cli.IntFlag{
Name: "timeout, t",
Value: defaultTimeout,
@ -120,9 +125,10 @@ func main() {
UsageText: `
This command will print out information about the cluster peer used
`,
Flags: []cli.Flag{parseFlag(formatID)},
Action: func(c *cli.Context) error {
resp := request("GET", "/id", nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -139,9 +145,10 @@ This command can be used to list and manage IPFS Cluster peers.
UsageText: `
This commands provides a list of the ID information of all the peers in the Cluster.
`,
Flags: []cli.Flag{parseFlag(formatID)},
Action: func(c *cli.Context) error {
resp := request("GET", "/peers", nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -154,6 +161,7 @@ succeed, the new peer needs to be reachable and any other member of the cluster
should be online. The operation returns the ID information for the new peer.
`,
ArgsUsage: "<multiaddress>",
Flags: []cli.Flag{parseFlag(formatID)},
Action: func(c *cli.Context) error {
addr := c.Args().First()
if addr == "" {
@ -166,7 +174,7 @@ should be online. The operation returns the ID information for the new peer.
enc := json.NewEncoder(&buf)
enc.Encode(addBody)
resp := request("POST", "/peers", &buf)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -180,12 +188,13 @@ operation to succeed, otherwise some nodes may be left with an outdated list of
cluster peers.
`,
ArgsUsage: "<peer ID>",
Flags: []cli.Flag{parseFlag(formatNone)},
Action: func(c *cli.Context) error {
pid := c.Args().First()
_, err := peer.IDB58Decode(pid)
checkErr("parsing peer ID", err)
resp := request("DELETE", "/peers/"+pid, nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -211,14 +220,16 @@ When the request has succeeded, the command returns the status of the CID
in the cluster and should be part of the list offered by "pin ls".
`,
ArgsUsage: "<cid>",
Flags: []cli.Flag{parseFlag(formatGPInfo)},
Action: func(c *cli.Context) error {
cidStr := c.Args().First()
_, err := cid.Decode(cidStr)
checkErr("parsing cid", err)
request("POST", "/pins/"+cidStr, nil)
resp := request("POST", "/pins/"+cidStr, nil)
formatResponse(c, resp)
time.Sleep(500 * time.Millisecond)
resp := request("GET", "/pins/"+cidStr, nil)
formatResponse(resp)
resp = request("GET", "/pins/"+cidStr, nil)
formatResponse(c, resp)
return nil
},
},
@ -234,6 +245,7 @@ in the cluster. The CID should disappear from the list offered by "pin ls",
although unpinning operations in the cluster may take longer or fail.
`,
ArgsUsage: "<cid>",
Flags: []cli.Flag{parseFlag(formatGPInfo)},
Action: func(c *cli.Context) error {
cidStr := c.Args().First()
_, err := cid.Decode(cidStr)
@ -241,7 +253,7 @@ although unpinning operations in the cluster may take longer or fail.
request("DELETE", "/pins/"+cidStr, nil)
time.Sleep(500 * time.Millisecond)
resp := request("GET", "/pins/"+cidStr, nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -249,14 +261,16 @@ although unpinning operations in the cluster may take longer or fail.
Name: "ls",
Usage: "List tracked CIDs",
UsageText: `
This command will list the CIDs which are tracked by IPFS Cluster. This
list does not include information about tracking status or location, it
This command will list the CIDs which are tracked by IPFS Cluster and to
which peers they are currently allocated. This list does not include
any monitoring information about the
merely represents the list of pins which are part of the global state of
the cluster. For specific information, use "status".
`,
Flags: []cli.Flag{parseFlag(formatCidArg)},
Action: func(c *cli.Context) error {
resp := request("GET", "/pinlist", nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -275,6 +289,7 @@ The status of a CID may not be accurate. A manual sync can be triggered
with "sync".
`,
ArgsUsage: "[cid]",
Flags: []cli.Flag{parseFlag(formatGPInfo)},
Action: func(c *cli.Context) error {
cidStr := c.Args().First()
if cidStr != "" {
@ -282,7 +297,7 @@ with "sync".
checkErr("parsing cid", err)
}
resp := request("GET", "/pins/"+cidStr, nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -302,6 +317,7 @@ therefore, the output should be empty if no operations were performed.
CIDs in error state may be manually recovered with "recover".
`,
ArgsUsage: "[cid]",
Flags: []cli.Flag{parseFlag(formatGPInfo)},
Action: func(c *cli.Context) error {
cidStr := c.Args().First()
var resp *http.Response
@ -312,7 +328,7 @@ CIDs in error state may be manually recovered with "recover".
} else {
resp = request("POST", "/pins/sync", nil)
}
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
@ -327,6 +343,7 @@ The command will wait for any operations to succeed and will return the status
of the item upon completion.
`,
ArgsUsage: "<cid>",
Flags: []cli.Flag{parseFlag(formatGPInfo)},
Action: func(c *cli.Context) error {
cidStr := c.Args().First()
var resp *http.Response
@ -334,7 +351,7 @@ of the item upon completion.
_, err := cid.Decode(cidStr)
checkErr("parsing cid", err)
resp = request("POST", "/pins/"+cidStr+"/recover", nil)
formatResponse(resp)
formatResponse(c, resp)
} else {
return cli.NewExitError("A CID is required", 1)
@ -349,9 +366,19 @@ of the item upon completion.
This command retrieves the IPFS Cluster version and can be used
to check that it matches the CLI version (shown by -v).
`,
Flags: []cli.Flag{parseFlag(formatVersion)},
Action: func(c *cli.Context) error {
resp := request("GET", "/version", nil)
formatResponse(resp)
formatResponse(c, resp)
return nil
},
},
{
Name: "commands",
Usage: "List all commands",
Hidden: true,
Action: func(c *cli.Context) error {
walkCommands(c.App.Commands)
return nil
},
},
@ -360,6 +387,21 @@ to check that it matches the CLI version (shown by -v).
app.Run(os.Args)
}
func parseFlag(t int) cli.IntFlag {
return cli.IntFlag{
Name: "parseAs",
Value: t,
Hidden: true,
}
}
func walkCommands(cmds []cli.Command) {
for _, c := range cmds {
fmt.Println(c.HelpName)
walkCommands(c.Subcommands)
}
}
func request(method, path string, body io.Reader, args ...string) *http.Response {
ctx, cancel := context.WithTimeout(context.Background(),
time.Duration(defaultTimeout)*time.Second)
@ -386,26 +428,34 @@ func request(method, path string, body io.Reader, args ...string) *http.Response
return resp
}
func formatResponse(r *http.Response) {
func formatResponse(c *cli.Context, r *http.Response) {
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
checkErr("reading body", err)
logger.Debugf("Body: %s", body)
if r.StatusCode > 399 {
switch {
case r.StatusCode > 399:
var e errorResp
err = json.Unmarshal(body, &e)
checkErr("decoding error response", err)
out("Error %d: %s", e.Code, e.Message)
} else if r.StatusCode == http.StatusAccepted {
out("%s", "request accepted")
} else if r.StatusCode == http.StatusNoContent {
case r.StatusCode == http.StatusAccepted:
out("%s", "Request accepted")
case r.StatusCode == http.StatusNoContent:
out("%s", "Request succeeded\n")
} else {
var resp interface{}
err = json.Unmarshal(body, &resp)
checkErr("decoding response", err)
prettyPrint(body)
default:
enc := c.GlobalString("encoding")
switch enc {
case "text":
textFormat(body, c.Int("parseAs"))
default:
var resp interface{}
err = json.Unmarshal(body, &resp)
checkErr("decoding response", err)
prettyPrint(body)
}
}
}

View File

@ -13,6 +13,9 @@ import (
"github.com/urfave/cli"
ipfscluster "github.com/ipfs/ipfs-cluster"
"github.com/ipfs/ipfs-cluster/allocator/numpinalloc"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/state/mapstate"
)
// ProgramName of this application
@ -214,7 +217,7 @@ func run(c *cli.Context) error {
if a := c.String("bootstrap"); a != "" {
if len(cfg.ClusterPeers) > 0 && !c.Bool("force") {
return errors.New("The configuration provides ClusterPeers. Use -f to ignore and proceed bootstrapping")
return errors.New("the configuration provides ClusterPeers. Use -f to ignore and proceed bootstrapping")
}
joinAddr, err := ma.NewMultiaddr(a)
if err != nil {
@ -234,14 +237,21 @@ func run(c *cli.Context) error {
proxy, err := ipfscluster.NewIPFSHTTPConnector(cfg)
checkErr("creating IPFS Connector component", err)
state := ipfscluster.NewMapState()
state := mapstate.NewMapState()
tracker := ipfscluster.NewMapPinTracker(cfg)
mon := ipfscluster.NewStdPeerMonitor(5)
informer := numpin.NewInformer()
alloc := numpinalloc.NewAllocator()
cluster, err := ipfscluster.NewCluster(
cfg,
api,
proxy,
state,
tracker)
tracker,
mon,
alloc,
informer)
checkErr("starting cluster", err)
signalChan := make(chan os.Signal, 20)

View File

@ -14,6 +14,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
@ -239,7 +241,9 @@ func (ipfs *IPFSHTTPConnector) pinOpHandler(op string, w http.ResponseWriter, r
err = ipfs.rpcClient.Call("",
"Cluster",
op,
&CidArg{arg},
api.CidArgSerial{
Cid: arg,
},
&struct{}{})
if err != nil {
@ -268,7 +272,7 @@ func (ipfs *IPFSHTTPConnector) pinLsHandler(w http.ResponseWriter, r *http.Reque
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
var pins []string
var pins []api.CidArgSerial
err := ipfs.rpcClient.Call("",
"Cluster",
"PinList",
@ -281,7 +285,7 @@ func (ipfs *IPFSHTTPConnector) pinLsHandler(w http.ResponseWriter, r *http.Reque
}
for _, pin := range pins {
pinLs.Keys[pin] = ipfsPinType{
pinLs.Keys[pin.Cid] = ipfsPinType{
Type: "recursive",
}
}
@ -345,8 +349,8 @@ func (ipfs *IPFSHTTPConnector) Shutdown() error {
// If the request fails, or the parsing fails, it
// returns an error and an empty IPFSID which also
// contains the error message.
func (ipfs *IPFSHTTPConnector) ID() (IPFSID, error) {
id := IPFSID{}
func (ipfs *IPFSHTTPConnector) ID() (api.IPFSID, error) {
id := api.IPFSID{}
body, err := ipfs.get("id")
if err != nil {
id.Error = err.Error()
@ -420,23 +424,10 @@ func (ipfs *IPFSHTTPConnector) Unpin(hash *cid.Cid) error {
return nil
}
func parseIPFSPinType(t string) IPFSPinStatus {
switch {
case t == "indirect":
return IPFSPinStatusIndirect
case t == "direct":
return IPFSPinStatusDirect
case t == "recursive":
return IPFSPinStatusRecursive
default:
return IPFSPinStatusBug
}
}
// PinLs performs a "pin ls" request against the configured IPFS daemon and
// returns a map of cid strings and their status.
func (ipfs *IPFSHTTPConnector) PinLs() (map[string]IPFSPinStatus, error) {
body, err := ipfs.get("pin/ls")
// PinLs performs a "pin ls --type typeFilter" request against the configured
// IPFS daemon and returns a map of cid strings and their status.
func (ipfs *IPFSHTTPConnector) PinLs(typeFilter string) (map[string]api.IPFSPinStatus, error) {
body, err := ipfs.get("pin/ls?type=" + typeFilter)
// Some error talking to the daemon
if err != nil {
@ -451,27 +442,27 @@ func (ipfs *IPFSHTTPConnector) PinLs() (map[string]IPFSPinStatus, error) {
return nil, err
}
statusMap := make(map[string]IPFSPinStatus)
statusMap := make(map[string]api.IPFSPinStatus)
for k, v := range resp.Keys {
statusMap[k] = parseIPFSPinType(v.Type)
statusMap[k] = api.IPFSPinStatusFromString(v.Type)
}
return statusMap, nil
}
// PinLsCid performs a "pin ls <hash> "request and returns IPFSPinStatus for
// that hash.
func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (IPFSPinStatus, error) {
func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (api.IPFSPinStatus, error) {
lsPath := fmt.Sprintf("pin/ls?arg=%s", hash)
body, err := ipfs.get(lsPath)
// Network error, daemon down
if body == nil && err != nil {
return IPFSPinStatusError, err
return api.IPFSPinStatusError, err
}
// Pin not found likely here
if err != nil { // Not pinned
return IPFSPinStatusUnpinned, nil
return api.IPFSPinStatusUnpinned, nil
}
var resp ipfsPinLsResp
@ -479,14 +470,14 @@ func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (IPFSPinStatus, error) {
if err != nil {
logger.Error("parsing pin/ls?arg=cid response:")
logger.Error(string(body))
return IPFSPinStatusError, err
return api.IPFSPinStatusError, err
}
pinObj, ok := resp.Keys[hash.String()]
if !ok {
return IPFSPinStatusError, errors.New("expected to find the pin in the response")
return api.IPFSPinStatusError, errors.New("expected to find the pin in the response")
}
return parseIPFSPinType(pinObj.Type), nil
return api.IPFSPinStatusFromString(pinObj.Type), nil
}
// get performs the heavy lifting of a get request against
@ -518,8 +509,8 @@ func (ipfs *IPFSHTTPConnector) get(path string) ([]byte, error) {
msg = fmt.Sprintf("IPFS unsuccessful: %d: %s",
resp.StatusCode, ipfsErr.Message)
} else {
msg = fmt.Sprintf("IPFS-get unsuccessful: %d: %s",
resp.StatusCode, body)
msg = fmt.Sprintf("IPFS-get '%s' unsuccessful: %d: %s",
path, resp.StatusCode, body)
}
logger.Warning(msg)
return body, errors.New(msg)

View File

@ -7,26 +7,29 @@ import (
"net/http"
"testing"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
ma "github.com/multiformats/go-multiaddr"
)
func testIPFSConnectorConfig(mock *ipfsMock) *Config {
func testIPFSConnectorConfig(mock *test.IpfsMock) *Config {
cfg := testingConfig()
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.addr, mock.port))
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port))
cfg.IPFSNodeAddr = addr
return cfg
}
func testIPFSConnector(t *testing.T) (*IPFSHTTPConnector, *ipfsMock) {
mock := newIpfsMock()
func testIPFSConnector(t *testing.T) (*IPFSHTTPConnector, *test.IpfsMock) {
mock := test.NewIpfsMock()
cfg := testIPFSConnectorConfig(mock)
ipfs, err := NewIPFSHTTPConnector(cfg)
if err != nil {
t.Fatal("creating an IPFSConnector should work: ", err)
}
ipfs.SetClient(mockRPCClient(t))
ipfs.SetClient(test.NewMockRPCClient(t))
return ipfs, mock
}
@ -43,7 +46,7 @@ func TestIPFSID(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if id.ID != testPeerID {
if id.ID != test.TestPeerID1 {
t.Error("expected testPeerID")
}
if len(id.Addresses) != 1 {
@ -66,7 +69,7 @@ func TestIPFSPin(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := ipfs.Pin(c)
if err != nil {
t.Error("expected success pinning cid")
@ -79,7 +82,7 @@ func TestIPFSPin(t *testing.T) {
t.Error("cid should have been pinned")
}
c2, _ := cid.Decode(errorCid)
c2, _ := cid.Decode(test.ErrorCid)
err = ipfs.Pin(c2)
if err == nil {
t.Error("expected error pinning cid")
@ -90,7 +93,7 @@ func TestIPFSUnpin(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c, _ := cid.Decode(test.TestCid1)
err := ipfs.Unpin(c)
if err != nil {
t.Error("expected success unpinning non-pinned cid")
@ -106,8 +109,8 @@ func TestIPFSPinLsCid(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c2, _ := cid.Decode(testCid2)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
ipfs.Pin(c)
ips, err := ipfs.PinLsCid(c)
@ -116,7 +119,7 @@ func TestIPFSPinLsCid(t *testing.T) {
}
ips, err = ipfs.PinLsCid(c2)
if err != nil || ips != IPFSPinStatusUnpinned {
if err != nil || ips != api.IPFSPinStatusUnpinned {
t.Error("c2 should appear unpinned")
}
}
@ -125,12 +128,12 @@ func TestIPFSPinLs(t *testing.T) {
ipfs, mock := testIPFSConnector(t)
defer mock.Close()
defer ipfs.Shutdown()
c, _ := cid.Decode(testCid)
c2, _ := cid.Decode(testCid2)
c, _ := cid.Decode(test.TestCid1)
c2, _ := cid.Decode(test.TestCid2)
ipfs.Pin(c)
ipfs.Pin(c2)
ipsMap, err := ipfs.PinLs()
ipsMap, err := ipfs.PinLs("")
if err != nil {
t.Error("should not error")
}
@ -139,7 +142,7 @@ func TestIPFSPinLs(t *testing.T) {
t.Fatal("the map does not contain expected keys")
}
if !ipsMap[testCid].IsPinned() || !ipsMap[testCid2].IsPinned() {
if !ipsMap[test.TestCid1].IsPinned() || !ipsMap[test.TestCid2].IsPinned() {
t.Error("c1 and c2 should appear pinned")
}
}
@ -191,7 +194,7 @@ func TestIPFSProxyPin(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/add?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -206,7 +209,7 @@ func TestIPFSProxyPin(t *testing.T) {
t.Fatal(err)
}
if len(resp.Pins) != 1 || resp.Pins[0] != testCid {
if len(resp.Pins) != 1 || resp.Pins[0] != test.TestCid1 {
t.Error("wrong response")
}
res.Body.Close()
@ -215,7 +218,7 @@ func TestIPFSProxyPin(t *testing.T) {
res, err = http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/add?arg=%s",
host,
port,
errorCid))
test.ErrorCid))
if err != nil {
t.Fatal("request should work: ", err)
}
@ -230,7 +233,7 @@ func TestIPFSProxyPin(t *testing.T) {
t.Fatal(err)
}
if respErr.Message != errBadCid.Error() {
if respErr.Message != test.ErrBadCid.Error() {
t.Error("wrong response")
}
res.Body.Close()
@ -247,7 +250,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/rm?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -263,7 +266,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
t.Fatal(err)
}
if len(resp.Pins) != 1 || resp.Pins[0] != testCid {
if len(resp.Pins) != 1 || resp.Pins[0] != test.TestCid1 {
t.Error("wrong response")
}
res.Body.Close()
@ -272,7 +275,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
res, err = http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/rm?arg=%s",
host,
port,
errorCid))
test.ErrorCid))
if err != nil {
t.Fatal("request should work: ", err)
}
@ -287,7 +290,7 @@ func TestIPFSProxyUnpin(t *testing.T) {
t.Fatal(err)
}
if respErr.Message != errBadCid.Error() {
if respErr.Message != test.ErrBadCid.Error() {
t.Error("wrong response")
}
res.Body.Close()
@ -304,7 +307,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
res, err := http.Get(fmt.Sprintf("http://%s:%s/api/v0/pin/ls?arg=%s",
host,
port,
testCid))
test.TestCid1))
if err != nil {
t.Fatal("should have succeeded: ", err)
}
@ -320,7 +323,7 @@ func TestIPFSProxyPinLs(t *testing.T) {
t.Fatal(err)
}
_, ok := resp.Keys[testCid]
_, ok := resp.Keys[test.TestCid1]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}

View File

@ -9,106 +9,17 @@
package ipfscluster
import (
"time"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
"github.com/ipfs/ipfs-cluster/api"
)
// RPCProtocol is used to send libp2p messages between cluster peers
var RPCProtocol = protocol.ID("/ipfscluster/" + Version + "/rpc")
// TrackerStatus values
const (
// IPFSStatus should never take this value
TrackerStatusBug = iota
// The cluster node is offline or not responding
TrackerStatusClusterError
// An error occurred pinning
TrackerStatusPinError
// An error occurred unpinning
TrackerStatusUnpinError
// The IPFS daemon has pinned the item
TrackerStatusPinned
// The IPFS daemon is currently pinning the item
TrackerStatusPinning
// The IPFS daemon is currently unpinning the item
TrackerStatusUnpinning
// The IPFS daemon is not pinning the item
TrackerStatusUnpinned
// The IPFS deamon is not pinning the item but it is being tracked
TrackerStatusRemotePin
)
// TrackerStatus represents the status of a tracked Cid in the PinTracker
type TrackerStatus int
// IPFSPinStatus values
const (
IPFSPinStatusBug = iota
IPFSPinStatusError
IPFSPinStatusDirect
IPFSPinStatusRecursive
IPFSPinStatusIndirect
IPFSPinStatusUnpinned
)
// IPFSPinStatus represents the status of a pin in IPFS (direct, recursive etc.)
type IPFSPinStatus int
// IsPinned returns true if the status is Direct or Recursive
func (ips IPFSPinStatus) IsPinned() bool {
return ips == IPFSPinStatusDirect || ips == IPFSPinStatusRecursive
}
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid *cid.Cid
PeerMap map[peer.ID]PinInfo
}
// PinInfo holds information about local pins. PinInfo is
// serialized when requesting the Global status, therefore
// we cannot use *cid.Cid.
type PinInfo struct {
CidStr string
Peer peer.ID
Status TrackerStatus
TS time.Time
Error string
}
// String converts an IPFSStatus into a readable string.
func (st TrackerStatus) String() string {
switch st {
case TrackerStatusBug:
return "bug"
case TrackerStatusClusterError:
return "cluster_error"
case TrackerStatusPinError:
return "pin_error"
case TrackerStatusUnpinError:
return "unpin_error"
case TrackerStatusPinned:
return "pinned"
case TrackerStatusPinning:
return "pinning"
case TrackerStatusUnpinning:
return "unpinning"
case TrackerStatusUnpinned:
return "unpinned"
case TrackerStatusRemotePin:
return "remote"
default:
return ""
}
}
// Component represents a piece of ipfscluster. Cluster components
// usually run their own goroutines (a http server for example). They
// communicate with the main Cluster component and other components
@ -128,11 +39,11 @@ type API interface {
// an IPFS daemon. This is a base component.
type IPFSConnector interface {
Component
ID() (IPFSID, error)
ID() (api.IPFSID, error)
Pin(*cid.Cid) error
Unpin(*cid.Cid) error
PinLsCid(*cid.Cid) (IPFSPinStatus, error)
PinLs() (map[string]IPFSPinStatus, error)
PinLsCid(*cid.Cid) (api.IPFSPinStatus, error)
PinLs(typeFilter string) (map[string]api.IPFSPinStatus, error)
}
// Peered represents a component which needs to be aware of the peers
@ -147,15 +58,16 @@ type Peered interface {
// is used by the Consensus component to keep track of
// objects which objects are pinned. This component should be thread safe.
type State interface {
// AddPin adds a pin to the State
AddPin(*cid.Cid) error
// RmPin removes a pin from the State
RmPin(*cid.Cid) error
// ListPins lists all the pins in the state
ListPins() []*cid.Cid
// HasPin returns true if the state is holding a Cid
HasPin(*cid.Cid) bool
// AddPeer adds a peer to the shared state
// Add adds a pin to the State
Add(api.CidArg) error
// Rm removes a pin from the State
Rm(*cid.Cid) error
// List lists all the pins in the state
List() []api.CidArg
// Has returns true if the state is holding information for a Cid
Has(*cid.Cid) bool
// Get returns the information attacthed to this pin
Get(*cid.Cid) api.CidArg
}
// PinTracker represents a component which tracks the status of
@ -165,159 +77,60 @@ type PinTracker interface {
Component
// Track tells the tracker that a Cid is now under its supervision
// The tracker may decide to perform an IPFS pin.
Track(*cid.Cid) error
Track(api.CidArg) error
// Untrack tells the tracker that a Cid is to be forgotten. The tracker
// may perform an IPFS unpin operation.
Untrack(*cid.Cid) error
// StatusAll returns the list of pins with their local status.
StatusAll() []PinInfo
StatusAll() []api.PinInfo
// Status returns the local status of a given Cid.
Status(*cid.Cid) PinInfo
Status(*cid.Cid) api.PinInfo
// SyncAll makes sure that all tracked Cids reflect the real IPFS status.
// It returns the list of pins which were updated by the call.
SyncAll() ([]PinInfo, error)
SyncAll() ([]api.PinInfo, error)
// Sync makes sure that the Cid status reflect the real IPFS status.
// It returns the local status of the Cid.
Sync(*cid.Cid) (PinInfo, error)
Sync(*cid.Cid) (api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in Cids with error status.
Recover(*cid.Cid) (PinInfo, error)
Recover(*cid.Cid) (api.PinInfo, error)
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
// Informer returns Metric information in a peer. The metrics produced by
// informers are then passed to a PinAllocator which will use them to
// determine where to pin content.
type Informer interface {
Component
Name() string
GetMetric() api.Metric
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string
Addresses MultiaddrsSerial
Error string
// PinAllocator decides where to pin certain content. In order to make such
// decision, it receives the pin arguments, the peers which are currently
// allocated to the content and metrics available for all peers which could
// allocate the content.
type PinAllocator interface {
Component
// Allocate returns the list of peers that should be assigned to
// Pin content in oder of preference (from the most preferred to the
// least). The current map contains the metrics for all peers
// which are currently pinning the content. The candidates map
// contains the metrics for all pins which are eligible for pinning
// the content.
Allocate(c *cid.Cid, current, candidates map[peer.ID]api.Metric) ([]peer.ID, error)
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
return IPFSIDSerial{
ID: peer.IDB58Encode(id.ID),
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToID converts an IPFSIDSerial to IPFSID
// It will ignore any errors when parsing the fields.
func (ids *IPFSIDSerial) ToID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
PublicKey crypto.PubKey
Addresses []ma.Multiaddr
ClusterPeers []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string
PublicKey []byte
Addresses MultiaddrsSerial
ClusterPeers MultiaddrsSerial
Version string
Commit string
RPCProtocolVersion string
Error string
IPFS IPFSIDSerial
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
var pkey []byte
if id.PublicKey != nil {
pkey, _ = id.PublicKey.Bytes()
}
return IDSerial{
ID: peer.IDB58Encode(id.ID),
PublicKey: pkey,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: MultiaddrsToSerial(id.ClusterPeers),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
id.PublicKey = pkey
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = ids.ClusterPeers.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToID()
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial []byte
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
return addr.Bytes()
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
a, _ := ma.NewMultiaddrBytes(addrS)
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
addrsS[i] = MultiaddrToSerial(a)
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
// PeerMonitor is a component in charge of monitoring the peers in the cluster
// and providing candidates to the PinAllocator when a pin request arrives.
type PeerMonitor interface {
Component
// LogMetric stores a metric. Metrics are pushed reguarly from each peer
// to the active PeerMonitor.
LogMetric(api.Metric)
// LastMetrics returns a map with the latest metrics of matching name
// for the current cluster peers.
LastMetrics(name string) []api.Metric
// Alerts delivers alerts generated when this peer monitor detects
// a problem (i.e. metrics not arriving as expected). Alerts are used to
// trigger rebalancing operations.
Alerts() <-chan api.Alert
}

View File

@ -4,25 +4,23 @@ import (
"fmt"
"math/rand"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/allocator/numpinalloc"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/informer/numpin"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
var (
testCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq"
testCid = testCid1
testCid2 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma"
testCid3 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb"
errorCid = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc"
testPeerID, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
)
//TestClusters*
var (
// number of clusters to create
@ -56,12 +54,12 @@ func randomBytes() []byte {
return bs
}
func createComponents(t *testing.T, i int) (*Config, *RESTAPI, *IPFSHTTPConnector, *MapState, *MapPinTracker, *ipfsMock) {
mock := newIpfsMock()
func createComponents(t *testing.T, i int) (*Config, API, IPFSConnector, State, PinTracker, PeerMonitor, PinAllocator, Informer, *test.IpfsMock) {
mock := test.NewIpfsMock()
clusterAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", clusterPort+i))
apiAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort+i))
proxyAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsProxyPort+i))
nodeAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.addr, mock.port))
nodeAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port))
priv, pub, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
checkErr(t, err)
pid, err := peer.IDFromPublicKey(pub)
@ -77,48 +75,59 @@ func createComponents(t *testing.T, i int) (*Config, *RESTAPI, *IPFSHTTPConnecto
cfg.IPFSNodeAddr = nodeAddr
cfg.ConsensusDataFolder = "./e2eTestRaft/" + pid.Pretty()
cfg.LeaveOnShutdown = false
cfg.ReplicationFactor = -1
api, err := NewRESTAPI(cfg)
checkErr(t, err)
ipfs, err := NewIPFSHTTPConnector(cfg)
checkErr(t, err)
state := NewMapState()
state := mapstate.NewMapState()
tracker := NewMapPinTracker(cfg)
mon := NewStdPeerMonitor(5)
alloc := numpinalloc.NewAllocator()
numpin.MetricTTL = 1 // second
inf := numpin.NewInformer()
return cfg, api, ipfs, state, tracker, mock
return cfg, api, ipfs, state, tracker, mon, alloc, inf, mock
}
func createCluster(t *testing.T, cfg *Config, api *RESTAPI, ipfs *IPFSHTTPConnector, state *MapState, tracker *MapPinTracker) *Cluster {
cl, err := NewCluster(cfg, api, ipfs, state, tracker)
func createCluster(t *testing.T, cfg *Config, api API, ipfs IPFSConnector, state State, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer) *Cluster {
cl, err := NewCluster(cfg, api, ipfs, state, tracker, mon, alloc, inf)
checkErr(t, err)
<-cl.Ready()
return cl
}
func createOnePeerCluster(t *testing.T, nth int) (*Cluster, *ipfsMock) {
cfg, api, ipfs, state, tracker, mock := createComponents(t, nth)
cl := createCluster(t, cfg, api, ipfs, state, tracker)
func createOnePeerCluster(t *testing.T, nth int) (*Cluster, *test.IpfsMock) {
cfg, api, ipfs, state, tracker, mon, alloc, inf, mock := createComponents(t, nth)
cl := createCluster(t, cfg, api, ipfs, state, tracker, mon, alloc, inf)
return cl, mock
}
func createClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
os.RemoveAll("./e2eTestRaft")
cfgs := make([]*Config, nClusters, nClusters)
apis := make([]*RESTAPI, nClusters, nClusters)
ipfss := make([]*IPFSHTTPConnector, nClusters, nClusters)
states := make([]*MapState, nClusters, nClusters)
trackers := make([]*MapPinTracker, nClusters, nClusters)
ipfsMocks := make([]*ipfsMock, nClusters, nClusters)
apis := make([]API, nClusters, nClusters)
ipfss := make([]IPFSConnector, nClusters, nClusters)
states := make([]State, nClusters, nClusters)
trackers := make([]PinTracker, nClusters, nClusters)
mons := make([]PeerMonitor, nClusters, nClusters)
allocs := make([]PinAllocator, nClusters, nClusters)
infs := make([]Informer, nClusters, nClusters)
ipfsMocks := make([]*test.IpfsMock, nClusters, nClusters)
clusters := make([]*Cluster, nClusters, nClusters)
clusterPeers := make([]ma.Multiaddr, nClusters, nClusters)
for i := 0; i < nClusters; i++ {
cfg, api, ipfs, state, tracker, mock := createComponents(t, i)
cfg, api, ipfs, state, tracker, mon, alloc, inf, mock := createComponents(t, i)
cfgs[i] = cfg
apis[i] = api
ipfss[i] = ipfs
states[i] = state
trackers[i] = tracker
mons[i] = mon
allocs[i] = alloc
infs[i] = inf
ipfsMocks[i] = mock
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/ipfs/%s",
clusterPort+i,
@ -148,7 +157,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
for i := 0; i < nClusters; i++ {
wg.Add(1)
go func(i int) {
clusters[i] = createCluster(t, cfgs[i], apis[i], ipfss[i], states[i], trackers[i])
clusters[i] = createCluster(t, cfgs[i], apis[i], ipfss[i], states[i], trackers[i], mons[i], allocs[i], infs[i])
wg.Done()
}(i)
}
@ -162,7 +171,7 @@ func createClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
return clusters, ipfsMocks
}
func shutdownClusters(t *testing.T, clusters []*Cluster, m []*ipfsMock) {
func shutdownClusters(t *testing.T, clusters []*Cluster, m []*test.IpfsMock) {
for i, c := range clusters {
m[i].Close()
err := c.Shutdown()
@ -222,8 +231,8 @@ func TestClustersPeers(t *testing.T) {
t.Fatal("expected as many peers as clusters")
}
clusterIDMap := make(map[peer.ID]ID)
peerIDMap := make(map[peer.ID]ID)
clusterIDMap := make(map[peer.ID]api.ID)
peerIDMap := make(map[peer.ID]api.ID)
for _, c := range clusters {
id := c.ID()
@ -239,9 +248,9 @@ func TestClustersPeers(t *testing.T) {
if !ok {
t.Fatal("expected id in both maps")
}
if !crypto.KeyEqual(id.PublicKey, id2.PublicKey) {
t.Error("expected same public key")
}
//if !crypto.KeyEqual(id.PublicKey, id2.PublicKey) {
// t.Error("expected same public key")
//}
if id.IPFS.ID != id2.IPFS.ID {
t.Error("expected same ipfs daemon ID")
}
@ -251,7 +260,7 @@ func TestClustersPeers(t *testing.T) {
func TestClustersPin(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
exampleCid, _ := cid.Decode(testCid)
exampleCid, _ := cid.Decode(test.TestCid1)
prefix := exampleCid.Prefix()
for i := 0; i < nPins; i++ {
j := rand.Intn(nClusters) // choose a random cluster peer
@ -271,9 +280,9 @@ func TestClustersPin(t *testing.T) {
fpinned := func(t *testing.T, c *Cluster) {
status := c.tracker.StatusAll()
for _, v := range status {
if v.Status != TrackerStatusPinned {
if v.Status != api.TrackerStatusPinned {
t.Errorf("%s should have been pinned but it is %s",
v.CidStr,
v.Cid,
v.Status.String())
}
}
@ -288,12 +297,12 @@ func TestClustersPin(t *testing.T) {
for i := 0; i < nPins; i++ {
j := rand.Intn(nClusters) // choose a random cluster peer
err := clusters[j].Unpin(pinList[i])
err := clusters[j].Unpin(pinList[i].Cid)
if err != nil {
t.Errorf("error unpinning %s: %s", pinList[i], err)
}
// test re-unpin
err = clusters[j].Unpin(pinList[i])
err = clusters[j].Unpin(pinList[i].Cid)
if err != nil {
t.Errorf("error re-unpinning %s: %s", pinList[i], err)
}
@ -314,7 +323,7 @@ func TestClustersPin(t *testing.T) {
func TestClustersStatusAll(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(testCid)
h, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(h)
delay()
// Global status
@ -326,7 +335,7 @@ func TestClustersStatusAll(t *testing.T) {
if len(statuses) == 0 {
t.Fatal("bad status. Expected one item")
}
if statuses[0].Cid.String() != testCid {
if statuses[0].Cid.String() != test.TestCid1 {
t.Error("bad cid in status")
}
info := statuses[0].PeerMap
@ -334,7 +343,7 @@ func TestClustersStatusAll(t *testing.T) {
t.Error("bad info in status")
}
if info[c.host.ID()].Status != TrackerStatusPinned {
if info[c.host.ID()].Status != api.TrackerStatusPinned {
t.Error("the hash should have been pinned")
}
@ -348,7 +357,7 @@ func TestClustersStatusAll(t *testing.T) {
t.Fatal("Host not in status")
}
if pinfo.Status != TrackerStatusPinned {
if pinfo.Status != api.TrackerStatusPinned {
t.Error("the status should show the hash as pinned")
}
}
@ -358,8 +367,8 @@ func TestClustersStatusAll(t *testing.T) {
func TestClustersSyncAllLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -375,7 +384,7 @@ func TestClustersSyncAllLocal(t *testing.T) {
t.Fatal("expected 1 elem slice")
}
// Last-known state may still be pinning
if infos[0].Status != TrackerStatusPinError && infos[0].Status != TrackerStatusPinning {
if infos[0].Status != api.TrackerStatusPinError && infos[0].Status != api.TrackerStatusPinning {
t.Error("element should be in Pinning or PinError state")
}
}
@ -386,8 +395,8 @@ func TestClustersSyncAllLocal(t *testing.T) {
func TestClustersSyncLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -397,7 +406,7 @@ func TestClustersSyncLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinError && info.Status != TrackerStatusPinning {
if info.Status != api.TrackerStatusPinError && info.Status != api.TrackerStatusPinning {
t.Errorf("element is %s and not PinError", info.Status)
}
@ -406,7 +415,7 @@ func TestClustersSyncLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinned {
if info.Status != api.TrackerStatusPinned {
t.Error("element should be in Pinned state")
}
}
@ -417,8 +426,8 @@ func TestClustersSyncLocal(t *testing.T) {
func TestClustersSyncAll(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -431,15 +440,15 @@ func TestClustersSyncAll(t *testing.T) {
if len(ginfos) != 1 {
t.Fatal("expected globalsync to have 1 elements")
}
if ginfos[0].Cid.String() != errorCid {
t.Error("expected globalsync to have problems with errorCid")
if ginfos[0].Cid.String() != test.ErrorCid {
t.Error("expected globalsync to have problems with test.ErrorCid")
}
for _, c := range clusters {
inf, ok := ginfos[0].PeerMap[c.host.ID()]
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinError && inf.Status != TrackerStatusPinning {
if inf.Status != api.TrackerStatusPinError && inf.Status != api.TrackerStatusPinning {
t.Error("should be PinError in all peers")
}
}
@ -448,8 +457,8 @@ func TestClustersSyncAll(t *testing.T) {
func TestClustersSync(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
delay()
@ -469,8 +478,8 @@ func TestClustersSync(t *testing.T) {
t.Error("pinInfo error should not be empty")
}
if ginfo.Cid.String() != errorCid {
t.Error("GlobalPinInfo should be for errorCid")
if ginfo.Cid.String() != test.ErrorCid {
t.Error("GlobalPinInfo should be for test.ErrorCid")
}
for _, c := range clusters {
@ -480,7 +489,7 @@ func TestClustersSync(t *testing.T) {
t.Fatal("GlobalPinInfo should not be empty for this host")
}
if inf.Status != TrackerStatusPinError && inf.Status != TrackerStatusPinning {
if inf.Status != api.TrackerStatusPinError && inf.Status != api.TrackerStatusPinning {
t.Error("should be PinError or Pinning in all peers")
}
}
@ -491,7 +500,7 @@ func TestClustersSync(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != testCid2 {
if ginfo.Cid.String() != test.TestCid2 {
t.Error("GlobalPinInfo should be for testrCid2")
}
@ -500,7 +509,7 @@ func TestClustersSync(t *testing.T) {
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinned {
if inf.Status != api.TrackerStatusPinned {
t.Error("the GlobalPinInfo should show Pinned in all peers")
}
}
@ -509,8 +518,8 @@ func TestClustersSync(t *testing.T) {
func TestClustersRecoverLocal(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
@ -521,7 +530,7 @@ func TestClustersRecoverLocal(t *testing.T) {
if err == nil {
t.Error("expected an error recovering")
}
if info.Status != TrackerStatusPinError {
if info.Status != api.TrackerStatusPinError {
t.Errorf("element is %s and not PinError", info.Status)
}
@ -530,7 +539,7 @@ func TestClustersRecoverLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinned {
if info.Status != api.TrackerStatusPinned {
t.Error("element should be in Pinned state")
}
}
@ -541,8 +550,8 @@ func TestClustersRecoverLocal(t *testing.T) {
func TestClustersRecover(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
h, _ := cid.Decode(errorCid) // This cid always fails
h2, _ := cid.Decode(testCid2)
h, _ := cid.Decode(test.ErrorCid) // This cid always fails
h2, _ := cid.Decode(test.TestCid2)
clusters[0].Pin(h)
clusters[0].Pin(h2)
@ -566,11 +575,11 @@ func TestClustersRecover(t *testing.T) {
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
if !ok {
t.Logf("%+v", ginfo)
t.Fatal("GlobalPinInfo should not be empty for this host")
}
if inf.Status != TrackerStatusPinError {
if inf.Status != api.TrackerStatusPinError {
t.Logf("%+v", inf)
t.Error("should be PinError in all peers")
}
}
@ -581,7 +590,7 @@ func TestClustersRecover(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if ginfo.Cid.String() != testCid2 {
if ginfo.Cid.String() != test.TestCid2 {
t.Error("GlobalPinInfo should be for testrCid2")
}
@ -590,7 +599,7 @@ func TestClustersRecover(t *testing.T) {
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinned {
if inf.Status != api.TrackerStatusPinned {
t.Error("the GlobalPinInfo should show Pinned in all peers")
}
}
@ -611,3 +620,212 @@ func TestClustersShutdown(t *testing.T) {
runF(t, clusters, f)
runF(t, clusters, f)
}
func TestClustersReplication(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
for _, c := range clusters {
c.config.ReplicationFactor = nClusters - 1
}
// Why is replication factor nClusters - 1?
// Because that way we know that pinning nCluster
// pins with an strategy like numpins (which tries
// to make everyone pin the same number of things),
// will result in each peer holding locally exactly
// nCluster pins.
// Let some metrics arrive
time.Sleep(time.Second)
tmpCid, _ := cid.Decode(test.TestCid1)
prefix := tmpCid.Prefix()
for i := 0; i < nClusters; i++ {
// Pick a random cluster and hash
j := rand.Intn(nClusters) // choose a random cluster peer
h, err := prefix.Sum(randomBytes()) // create random cid
checkErr(t, err)
err = clusters[j].Pin(h)
if err != nil {
t.Error(err)
}
time.Sleep(time.Second / 2)
// check that it is held by exactly nClusters -1 peers
gpi, err := clusters[j].Status(h)
if err != nil {
t.Fatal(err)
}
numLocal := 0
numRemote := 0
for _, v := range gpi.PeerMap {
if v.Status == api.TrackerStatusPinned {
numLocal++
} else if v.Status == api.TrackerStatusRemote {
numRemote++
}
}
if numLocal != nClusters-1 {
t.Errorf("We wanted replication %d but it's only %d",
nClusters-1, numLocal)
}
if numRemote != 1 {
t.Errorf("We wanted 1 peer track as remote but %d do", numRemote)
}
time.Sleep(time.Second / 2) // this is for metric to be up to date
}
f := func(t *testing.T, c *Cluster) {
pinfos := c.tracker.StatusAll()
if len(pinfos) != nClusters {
t.Error("Pinfos does not have the expected pins")
}
numRemote := 0
numLocal := 0
for _, pi := range pinfos {
switch pi.Status {
case api.TrackerStatusPinned:
numLocal++
case api.TrackerStatusRemote:
numRemote++
}
}
if numLocal != nClusters-1 {
t.Errorf("Expected %d local pins but got %d", nClusters-1, numLocal)
}
if numRemote != 1 {
t.Errorf("Expected 1 remote pin but got %d", numRemote)
}
pins := c.Pins()
for _, pin := range pins {
allocs := pin.Allocations
if len(allocs) != nClusters-1 {
t.Errorf("Allocations are [%s]", allocs)
}
for _, a := range allocs {
if a == c.id {
pinfo := c.tracker.Status(pin.Cid)
if pinfo.Status != api.TrackerStatusPinned {
t.Errorf("Peer %s was allocated but it is not pinning cid", c.id)
}
}
}
}
}
runF(t, clusters, f)
}
// In this test we check that repinning something
// when a node has gone down will re-assign the pin
func TestClustersReplicationRealloc(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
for _, c := range clusters {
c.config.ReplicationFactor = nClusters - 1
}
// Let some metrics arrive
time.Sleep(time.Second)
j := rand.Intn(nClusters)
h, _ := cid.Decode(test.TestCid1)
err := clusters[j].Pin(h)
if err != nil {
t.Error(err)
}
// Let the pin arrive
time.Sleep(time.Second / 2)
// Re-pin should fail as it is allocated already
err = clusters[j].Pin(h)
if err == nil {
t.Fatal("expected an error")
}
t.Log(err)
var killedClusterIndex int
// find someone that pinned it and kill that cluster
for i, c := range clusters {
pinfo := c.tracker.Status(h)
if pinfo.Status == api.TrackerStatusPinned {
killedClusterIndex = i
c.Shutdown()
return
}
}
// let metrics expire
time.Sleep(2 * time.Second)
// now pin should succeed
err = clusters[j].Pin(h)
if err != nil {
t.Fatal(err)
}
numPinned := 0
for i, c := range clusters {
if i == killedClusterIndex {
continue
}
pinfo := c.tracker.Status(h)
if pinfo.Status == api.TrackerStatusPinned {
numPinned++
}
}
if numPinned != nClusters-1 {
t.Error("pin should have been correctly re-assigned")
}
}
// In this test we try to pin something when there are not
// as many available peers a we need. It's like before, except
// more peers are killed.
func TestClustersReplicationNotEnoughPeers(t *testing.T) {
if nClusters < 5 {
t.Skip("Need at least 5 peers")
}
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
for _, c := range clusters {
c.config.ReplicationFactor = nClusters - 1
}
// Let some metrics arrive
time.Sleep(time.Second)
j := rand.Intn(nClusters)
h, _ := cid.Decode(test.TestCid1)
err := clusters[j].Pin(h)
if err != nil {
t.Error(err)
}
// Let the pin arrive
time.Sleep(time.Second / 2)
clusters[1].Shutdown()
clusters[2].Shutdown()
// Time for consensus to catch up again in case we hit the leader.
delay()
err = clusters[j].Pin(h)
if err == nil {
t.Fatal("expected an error")
}
if !strings.Contains(err.Error(), "enough allocations") {
t.Error("different error than expected")
t.Error(err)
}
t.Log(err)
}

109
log_op.go Normal file
View File

@ -0,0 +1,109 @@
package ipfscluster
import (
"context"
"errors"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
consensus "github.com/libp2p/go-libp2p-consensus"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
// Type of consensus operation
const (
LogOpPin = iota + 1
LogOpUnpin
LogOpAddPeer
LogOpRmPeer
)
// LogOpType expresses the type of a consensus Operation
type LogOpType int
// LogOp represents an operation for the OpLogConsensus system.
// It implements the consensus.Op interface and it is used by the
// Consensus component.
type LogOp struct {
Cid api.CidArgSerial
Peer api.MultiaddrSerial
Type LogOpType
ctx context.Context
rpcClient *rpc.Client
}
// ApplyTo applies the operation to the State
func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) {
state, ok := cstate.(State)
var err error
if !ok {
// Should never be here
panic("received unexpected state type")
}
switch op.Type {
case LogOpPin:
arg := op.Cid.ToCidArg()
err = state.Add(arg)
if err != nil {
goto ROLLBACK
}
// Async, we let the PinTracker take care of any problems
op.rpcClient.Go("",
"Cluster",
"Track",
arg.ToSerial(),
&struct{}{},
nil)
case LogOpUnpin:
arg := op.Cid.ToCidArg()
err = state.Rm(arg.Cid)
if err != nil {
goto ROLLBACK
}
// Async, we let the PinTracker take care of any problems
op.rpcClient.Go("",
"Cluster",
"Untrack",
arg.ToSerial(),
&struct{}{},
nil)
case LogOpAddPeer:
addr := op.Peer.ToMultiaddr()
op.rpcClient.Call("",
"Cluster",
"PeerManagerAddPeer",
api.MultiaddrToSerial(addr),
&struct{}{})
// TODO rebalance ops
case LogOpRmPeer:
addr := op.Peer.ToMultiaddr()
pidstr, err := addr.ValueForProtocol(ma.P_IPFS)
if err != nil {
panic("peer badly encoded")
}
pid, err := peer.IDB58Decode(pidstr)
if err != nil {
panic("could not decode a PID we ourselves encoded")
}
op.rpcClient.Call("",
"Cluster",
"PeerManagerRmPeer",
pid,
&struct{}{})
// TODO rebalance ops
default:
logger.Error("unknown LogOp type. Ignoring")
}
return state, nil
ROLLBACK:
// We failed to apply the operation to the state
// and therefore we need to request a rollback to the
// cluster to the previous state. This operation can only be performed
// by the cluster leader.
logger.Error("Rollbacks are not implemented")
return nil, errors.New("a rollback may be necessary. Reason: " + err.Error())
}

82
log_op_test.go Normal file
View File

@ -0,0 +1,82 @@
package ipfscluster
import (
"context"
"testing"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
"github.com/ipfs/ipfs-cluster/test"
)
func TestApplyToPin(t *testing.T) {
op := &LogOp{
Cid: api.CidArgSerial{Cid: test.TestCid1},
Type: LogOpPin,
ctx: context.Background(),
rpcClient: test.NewMockRPCClient(t),
}
st := mapstate.NewMapState()
op.ApplyTo(st)
pins := st.List()
if len(pins) != 1 || pins[0].Cid.String() != test.TestCid1 {
t.Error("the state was not modified correctly")
}
}
func TestApplyToUnpin(t *testing.T) {
op := &LogOp{
Cid: api.CidArgSerial{Cid: test.TestCid1},
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: test.NewMockRPCClient(t),
}
st := mapstate.NewMapState()
c, _ := cid.Decode(test.TestCid1)
st.Add(api.CidArg{Cid: c, Everywhere: true})
op.ApplyTo(st)
pins := st.List()
if len(pins) != 0 {
t.Error("the state was not modified correctly")
}
}
func TestApplyToBadState(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("should have recovered an error")
}
}()
op := &LogOp{
Cid: api.CidArgSerial{Cid: test.TestCid1},
Type: LogOpUnpin,
ctx: context.Background(),
rpcClient: test.NewMockRPCClient(t),
}
var st interface{}
op.ApplyTo(st)
}
// func TestApplyToBadCid(t *testing.T) {
// defer func() {
// if r := recover(); r == nil {
// t.Error("should have recovered an error")
// }
// }()
// op := &LogOp{
// Cid: api.CidArgSerial{Cid: "agadfaegf"},
// Type: LogOpPin,
// ctx: context.Background(),
// rpcClient: test.NewMockRPCClient(t),
// }
// st := mapstate.NewMapState()
// op.ApplyTo(st)
// }

View File

@ -1,17 +1,14 @@
package ipfscluster
import (
"bufio"
"bytes"
"log"
"strings"
"time"
logging "github.com/ipfs/go-log"
)
var logger = logging.Logger("cluster")
var raftStdLogger = makeRaftLogger()
var raftStdLogger = log.New(&logForwarder{}, "", 0)
var raftLogger = logging.Logger("raft")
// SetFacilityLogLevel sets the log level for a given module
@ -27,33 +24,25 @@ func SetFacilityLogLevel(f, l string) {
logging.SetLogLevel(f, l)
}
// This redirects Raft output to our logger
func makeRaftLogger() *log.Logger {
var buf bytes.Buffer
rLogger := log.New(&buf, "", 0)
reader := bufio.NewReader(&buf)
go func() {
for {
t, err := reader.ReadString('\n')
if err != nil {
time.Sleep(time.Second)
continue
}
t = strings.TrimSuffix(t, "\n")
// implements the writer interface
type logForwarder struct{}
switch {
case strings.Contains(t, "[DEBUG]"):
raftLogger.Debug(strings.TrimPrefix(t, "[DEBUG] raft: "))
case strings.Contains(t, "[WARN]"):
raftLogger.Warning(strings.TrimPrefix(t, "[WARN] raft: "))
case strings.Contains(t, "[ERR]"):
raftLogger.Error(strings.TrimPrefix(t, "[ERR] raft: "))
case strings.Contains(t, "[INFO]"):
raftLogger.Info(strings.TrimPrefix(t, "[INFO] raft: "))
default:
raftLogger.Debug(t)
}
}
}()
return rLogger
// Write forwards to our go-log logger.
// According to https://golang.org/pkg/log/#Logger.Output
// it is called per line.
func (fw *logForwarder) Write(p []byte) (n int, err error) {
t := strings.TrimSuffix(string(p), "\n")
switch {
case strings.Contains(t, "[DEBUG]"):
raftLogger.Debug(strings.TrimPrefix(t, "[DEBUG] raft: "))
case strings.Contains(t, "[WARN]"):
raftLogger.Warning(strings.TrimPrefix(t, "[WARN] raft: "))
case strings.Contains(t, "[ERR]"):
raftLogger.Error(strings.TrimPrefix(t, "[ERR] raft: "))
case strings.Contains(t, "[INFO]"):
raftLogger.Info(strings.TrimPrefix(t, "[INFO] raft: "))
default:
raftLogger.Debug(t)
}
return len(p), nil
}

View File

@ -6,6 +6,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
@ -19,6 +21,11 @@ var (
UnpinningTimeout = 10 * time.Second
)
// PinQueueSize specifies the maximum amount of pin operations waiting
// to be performed. If the queue is full, pins/unpins will be set to
// pinError/unpinError.
var PinQueueSize = 1024
var (
errUnpinningTimeout = errors.New("unpinning operation is taking too long")
errPinningTimeout = errors.New("pinning operation is taking too long")
@ -30,47 +37,64 @@ var (
// to store the status of the tracked Cids. This component is thread-safe.
type MapPinTracker struct {
mux sync.RWMutex
status map[string]PinInfo
status map[string]api.PinInfo
ctx context.Context
cancel func()
ctx context.Context
rpcClient *rpc.Client
rpcReady chan struct{}
peerID peer.ID
peerID peer.ID
pinCh chan api.CidArg
unpinCh chan api.CidArg
shutdownLock sync.Mutex
shutdown bool
shutdownCh chan struct{}
wg sync.WaitGroup
}
// NewMapPinTracker returns a new object which has been correcly
// initialized with the given configuration.
func NewMapPinTracker(cfg *Config) *MapPinTracker {
ctx := context.Background()
ctx, cancel := context.WithCancel(context.Background())
mpt := &MapPinTracker{
ctx: ctx,
status: make(map[string]PinInfo),
rpcReady: make(chan struct{}, 1),
peerID: cfg.ID,
shutdownCh: make(chan struct{}, 1),
ctx: ctx,
cancel: cancel,
status: make(map[string]api.PinInfo),
rpcReady: make(chan struct{}, 1),
peerID: cfg.ID,
pinCh: make(chan api.CidArg, PinQueueSize),
unpinCh: make(chan api.CidArg, PinQueueSize),
}
mpt.run()
go mpt.pinWorker()
go mpt.unpinWorker()
return mpt
}
// run does nothing other than give MapPinTracker a cancellable context.
func (mpt *MapPinTracker) run() {
mpt.wg.Add(1)
go func() {
defer mpt.wg.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mpt.ctx = ctx
<-mpt.rpcReady
logger.Info("PinTracker ready")
<-mpt.shutdownCh
}()
// reads the queue and makes pins to the IPFS daemon one by one
func (mpt *MapPinTracker) pinWorker() {
for {
select {
case p := <-mpt.pinCh:
mpt.pin(p)
case <-mpt.ctx.Done():
return
}
}
}
// reads the queue and makes unpin requests to the IPFS daemon
func (mpt *MapPinTracker) unpinWorker() {
for {
select {
case p := <-mpt.unpinCh:
mpt.unpin(p)
case <-mpt.ctx.Done():
return
}
}
}
// Shutdown finishes the services provided by the MapPinTracker and cancels
@ -85,28 +109,27 @@ func (mpt *MapPinTracker) Shutdown() error {
}
logger.Info("stopping MapPinTracker")
mpt.cancel()
close(mpt.rpcReady)
mpt.shutdownCh <- struct{}{}
mpt.wg.Wait()
mpt.shutdown = true
return nil
}
func (mpt *MapPinTracker) set(c *cid.Cid, s TrackerStatus) {
func (mpt *MapPinTracker) set(c *cid.Cid, s api.TrackerStatus) {
mpt.mux.Lock()
defer mpt.mux.Unlock()
mpt.unsafeSet(c, s)
}
func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s TrackerStatus) {
if s == TrackerStatusUnpinned {
func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s api.TrackerStatus) {
if s == api.TrackerStatusUnpinned {
delete(mpt.status, c.String())
return
}
mpt.status[c.String()] = PinInfo{
// cid: c,
CidStr: c.String(),
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: s,
TS: time.Now(),
@ -114,19 +137,19 @@ func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s TrackerStatus) {
}
}
func (mpt *MapPinTracker) get(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) get(c *cid.Cid) api.PinInfo {
mpt.mux.RLock()
defer mpt.mux.RUnlock()
return mpt.unsafeGet(c)
}
func (mpt *MapPinTracker) unsafeGet(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) unsafeGet(c *cid.Cid) api.PinInfo {
p, ok := mpt.status[c.String()]
if !ok {
return PinInfo{
CidStr: c.String(),
return api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusUnpinned,
Status: api.TrackerStatusUnpinned,
TS: time.Now(),
Error: "",
}
@ -144,80 +167,116 @@ func (mpt *MapPinTracker) setError(c *cid.Cid, err error) {
func (mpt *MapPinTracker) unsafeSetError(c *cid.Cid, err error) {
p := mpt.unsafeGet(c)
switch p.Status {
case TrackerStatusPinned, TrackerStatusPinning, TrackerStatusPinError:
mpt.status[c.String()] = PinInfo{
CidStr: c.String(),
case api.TrackerStatusPinned, api.TrackerStatusPinning, api.TrackerStatusPinError:
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusPinError,
Status: api.TrackerStatusPinError,
TS: time.Now(),
Error: err.Error(),
}
case TrackerStatusUnpinned, TrackerStatusUnpinning, TrackerStatusUnpinError:
mpt.status[c.String()] = PinInfo{
CidStr: c.String(),
case api.TrackerStatusUnpinned, api.TrackerStatusUnpinning, api.TrackerStatusUnpinError:
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusUnpinError,
Status: api.TrackerStatusUnpinError,
TS: time.Now(),
Error: err.Error(),
}
}
}
func (mpt *MapPinTracker) pin(c *cid.Cid) error {
mpt.set(c, TrackerStatusPinning)
func (mpt *MapPinTracker) isRemote(c api.CidArg) bool {
if c.Everywhere {
return false
}
for _, p := range c.Allocations {
if p == mpt.peerID {
return false
}
}
return true
}
func (mpt *MapPinTracker) pin(c api.CidArg) error {
mpt.set(c.Cid, api.TrackerStatusPinning)
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPin",
NewCidArg(c),
c.ToSerial(),
&struct{}{})
if err != nil {
mpt.setError(c, err)
mpt.setError(c.Cid, err)
return err
}
mpt.set(c, TrackerStatusPinned)
mpt.set(c.Cid, api.TrackerStatusPinned)
return nil
}
func (mpt *MapPinTracker) unpin(c *cid.Cid) error {
mpt.set(c, TrackerStatusUnpinning)
func (mpt *MapPinTracker) unpin(c api.CidArg) error {
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSUnpin",
NewCidArg(c),
c.ToSerial(),
&struct{}{})
if err != nil {
mpt.setError(c, err)
mpt.setError(c.Cid, err)
return err
}
mpt.set(c, TrackerStatusUnpinned)
mpt.set(c.Cid, api.TrackerStatusUnpinned)
return nil
}
// Track tells the MapPinTracker to start managing a Cid,
// possibly trigerring Pin operations on the IPFS daemon.
func (mpt *MapPinTracker) Track(c *cid.Cid) error {
return mpt.pin(c)
func (mpt *MapPinTracker) Track(c api.CidArg) error {
if mpt.isRemote(c) {
if mpt.get(c.Cid).Status == api.TrackerStatusPinned {
mpt.unpin(c)
}
mpt.set(c.Cid, api.TrackerStatusRemote)
return nil
}
mpt.set(c.Cid, api.TrackerStatusPinning)
select {
case mpt.pinCh <- c:
default:
mpt.setError(c.Cid, errors.New("pin queue is full"))
return logError("map_pin_tracker pin queue is full")
}
return nil
}
// Untrack tells the MapPinTracker to stop managing a Cid.
// If the Cid is pinned locally, it will be unpinned.
func (mpt *MapPinTracker) Untrack(c *cid.Cid) error {
return mpt.unpin(c)
mpt.set(c, api.TrackerStatusUnpinning)
select {
case mpt.unpinCh <- api.CidArgCid(c):
default:
mpt.setError(c, errors.New("unpin queue is full"))
return logError("map_pin_tracker unpin queue is full")
}
return nil
}
// Status returns information for a Cid tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) Status(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) Status(c *cid.Cid) api.PinInfo {
return mpt.get(c)
}
// StatusAll returns information for all Cids tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) StatusAll() []PinInfo {
func (mpt *MapPinTracker) StatusAll() []api.PinInfo {
mpt.mux.Lock()
defer mpt.mux.Unlock()
pins := make([]PinInfo, 0, len(mpt.status))
pins := make([]api.PinInfo, 0, len(mpt.status))
for _, v := range mpt.status {
pins = append(pins, v)
}
@ -232,12 +291,12 @@ func (mpt *MapPinTracker) StatusAll() []PinInfo {
// Pins in error states can be recovered with Recover().
// An error is returned if we are unable to contact
// the IPFS daemon.
func (mpt *MapPinTracker) Sync(c *cid.Cid) (PinInfo, error) {
var ips IPFSPinStatus
func (mpt *MapPinTracker) Sync(c *cid.Cid) (api.PinInfo, error) {
var ips api.IPFSPinStatus
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPinLsCid",
NewCidArg(c),
api.CidArgCid(c).ToSerial(),
&ips)
if err != nil {
mpt.setError(c, err)
@ -254,13 +313,13 @@ func (mpt *MapPinTracker) Sync(c *cid.Cid) (PinInfo, error) {
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (mpt *MapPinTracker) SyncAll() ([]PinInfo, error) {
var ipsMap map[string]IPFSPinStatus
var pInfos []PinInfo
func (mpt *MapPinTracker) SyncAll() ([]api.PinInfo, error) {
var ipsMap map[string]api.IPFSPinStatus
var pInfos []api.PinInfo
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPinLs",
struct{}{},
"recursive",
&ipsMap)
if err != nil {
mpt.mux.Lock()
@ -275,57 +334,54 @@ func (mpt *MapPinTracker) SyncAll() ([]PinInfo, error) {
status := mpt.StatusAll()
for _, pInfoOrig := range status {
c, err := cid.Decode(pInfoOrig.CidStr)
if err != nil { // this should not happen but let's play safe
return pInfos, err
}
var pInfoNew PinInfo
ips, ok := ipsMap[pInfoOrig.CidStr]
var pInfoNew api.PinInfo
c := pInfoOrig.Cid
ips, ok := ipsMap[c.String()]
if !ok {
pInfoNew = mpt.syncStatus(c, IPFSPinStatusUnpinned)
pInfoNew = mpt.syncStatus(c, api.IPFSPinStatusUnpinned)
} else {
pInfoNew = mpt.syncStatus(c, ips)
}
if pInfoOrig.Status != pInfoNew.Status ||
pInfoNew.Status == TrackerStatusUnpinError ||
pInfoNew.Status == TrackerStatusPinError {
pInfoNew.Status == api.TrackerStatusUnpinError ||
pInfoNew.Status == api.TrackerStatusPinError {
pInfos = append(pInfos, pInfoNew)
}
}
return pInfos, nil
}
func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips IPFSPinStatus) PinInfo {
func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
p := mpt.get(c)
if ips.IsPinned() {
switch p.Status {
case TrackerStatusPinned: // nothing
case TrackerStatusPinning, TrackerStatusPinError:
mpt.set(c, TrackerStatusPinned)
case TrackerStatusUnpinning:
case api.TrackerStatusPinned: // nothing
case api.TrackerStatusPinning, api.TrackerStatusPinError:
mpt.set(c, api.TrackerStatusPinned)
case api.TrackerStatusUnpinning:
if time.Since(p.TS) > UnpinningTimeout {
mpt.setError(c, errUnpinningTimeout)
}
case TrackerStatusUnpinned:
case api.TrackerStatusUnpinned:
mpt.setError(c, errPinned)
case TrackerStatusUnpinError: // nothing, keep error as it was
default:
case api.TrackerStatusUnpinError: // nothing, keep error as it was
default: //remote
}
} else {
switch p.Status {
case TrackerStatusPinned:
case api.TrackerStatusPinned:
mpt.setError(c, errUnpinned)
case TrackerStatusPinError: // nothing, keep error as it was
case TrackerStatusPinning:
case api.TrackerStatusPinError: // nothing, keep error as it was
case api.TrackerStatusPinning:
if time.Since(p.TS) > PinningTimeout {
mpt.setError(c, errPinningTimeout)
}
case TrackerStatusUnpinning, TrackerStatusUnpinError:
mpt.set(c, TrackerStatusUnpinned)
case TrackerStatusUnpinned: // nothing
default:
case api.TrackerStatusUnpinning, api.TrackerStatusUnpinError:
mpt.set(c, api.TrackerStatusUnpinned)
case api.TrackerStatusUnpinned: // nothing
default: // remote
}
}
return mpt.get(c)
@ -333,20 +389,21 @@ func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips IPFSPinStatus) PinInfo {
// Recover will re-track or re-untrack a Cid in error state,
// possibly retriggering an IPFS pinning operation and returning
// only when it is done.
func (mpt *MapPinTracker) Recover(c *cid.Cid) (PinInfo, error) {
// only when it is done. The pinning/unpinning operation happens
// synchronously, jumping the queues.
func (mpt *MapPinTracker) Recover(c *cid.Cid) (api.PinInfo, error) {
p := mpt.get(c)
if p.Status != TrackerStatusPinError &&
p.Status != TrackerStatusUnpinError {
if p.Status != api.TrackerStatusPinError &&
p.Status != api.TrackerStatusUnpinError {
return p, nil
}
logger.Infof("Recovering %s", c)
var err error
switch p.Status {
case TrackerStatusPinError:
err = mpt.Track(c)
case TrackerStatusUnpinError:
err = mpt.Untrack(c)
case api.TrackerStatusPinError:
err = mpt.pin(api.CidArg{Cid: c})
case api.TrackerStatusUnpinError:
err = mpt.unpin(api.CidArg{Cid: c})
}
if err != nil {
logger.Errorf("error recovering %s: %s", c, err)

View File

@ -1,61 +0,0 @@
package ipfscluster
import (
"sync"
cid "github.com/ipfs/go-cid"
)
// MapState is a very simple database to store the state of the system
// using a Go map. It is thread safe. It implements the State interface.
type MapState struct {
pinMux sync.RWMutex
PinMap map[string]struct{}
peerMux sync.RWMutex
PeerMap map[string]string
}
// NewMapState initializes the internal map and returns a new MapState object.
func NewMapState() *MapState {
return &MapState{
PinMap: make(map[string]struct{}),
PeerMap: make(map[string]string),
}
}
// AddPin adds a Cid to the internal map.
func (st *MapState) AddPin(c *cid.Cid) error {
st.pinMux.Lock()
defer st.pinMux.Unlock()
var a struct{}
st.PinMap[c.String()] = a
return nil
}
// RmPin removes a Cid from the internal map.
func (st *MapState) RmPin(c *cid.Cid) error {
st.pinMux.Lock()
defer st.pinMux.Unlock()
delete(st.PinMap, c.String())
return nil
}
// HasPin returns true if the Cid belongs to the State.
func (st *MapState) HasPin(c *cid.Cid) bool {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
_, ok := st.PinMap[c.String()]
return ok
}
// ListPins provides a list of Cids in the State.
func (st *MapState) ListPins() []*cid.Cid {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
cids := make([]*cid.Cid, 0, len(st.PinMap))
for k := range st.PinMap {
c, _ := cid.Decode(k)
cids = append(cids, c)
}
return cids
}

View File

@ -6,13 +6,15 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/test"
cid "github.com/ipfs/go-cid"
ma "github.com/multiformats/go-multiaddr"
)
func peerManagerClusters(t *testing.T) ([]*Cluster, []*ipfsMock) {
func peerManagerClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) {
cls := make([]*Cluster, nClusters, nClusters)
mocks := make([]*ipfsMock, nClusters, nClusters)
mocks := make([]*test.IpfsMock, nClusters, nClusters)
var wg sync.WaitGroup
for i := 0; i < nClusters; i++ {
wg.Add(1)
@ -53,7 +55,7 @@ func TestClustersPeerAdd(t *testing.T) {
}
}
h, _ := cid.Decode(testCid)
h, _ := cid.Decode(test.TestCid1)
err := clusters[1].Pin(h)
if err != nil {
t.Fatal(err)
@ -160,6 +162,8 @@ func TestClustersPeerRemove(t *testing.T) {
t.Error(err)
}
delay()
f := func(t *testing.T, c *Cluster) {
if c.ID().ID == p { //This is the removed cluster
_, ok := <-c.Done()
@ -215,7 +219,7 @@ func TestClustersPeerJoin(t *testing.T) {
t.Fatal(err)
}
}
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()
@ -225,7 +229,7 @@ func TestClustersPeerJoin(t *testing.T) {
t.Error("all peers should be connected")
}
pins := c.Pins()
if len(pins) != 1 || !pins[0].Equals(hash) {
if len(pins) != 1 || !pins[0].Cid.Equals(hash) {
t.Error("all peers should have pinned the cid")
}
}
@ -248,7 +252,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
}
runF(t, clusters[1:], f)
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()
@ -258,7 +262,7 @@ func TestClustersPeerJoinAllAtOnce(t *testing.T) {
t.Error("all peers should be connected")
}
pins := c.Pins()
if len(pins) != 1 || !pins[0].Equals(hash) {
if len(pins) != 1 || !pins[0].Cid.Equals(hash) {
t.Error("all peers should have pinned the cid")
}
}
@ -290,7 +294,7 @@ func TestClustersPeerJoinAllAtOnceWithRandomBootstrap(t *testing.T) {
}
runF(t, clusters[2:], f)
hash, _ := cid.Decode(testCid)
hash, _ := cid.Decode(test.TestCid1)
clusters[0].Pin(hash)
delay()
@ -300,7 +304,7 @@ func TestClustersPeerJoinAllAtOnceWithRandomBootstrap(t *testing.T) {
t.Error("all peers should be connected")
}
pins := c.Pins()
if len(pins) != 1 || !pins[0].Equals(hash) {
if len(pins) != 1 || !pins[0].Cid.Equals(hash) {
t.Error("all peers should have pinned the cid")
}
}

220
peer_monitor.go Normal file
View File

@ -0,0 +1,220 @@
package ipfscluster
import (
"context"
"errors"
"sync"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
// AlertChannelCap specifies how much buffer the alerts channel has.
var AlertChannelCap = 256
// peerMetrics is just a circular queue
type peerMetrics struct {
last int
window []api.Metric
// mux sync.RWMutex
}
func newPeerMetrics(windowCap int) *peerMetrics {
w := make([]api.Metric, 0, windowCap)
return &peerMetrics{0, w}
}
func (pmets *peerMetrics) add(m api.Metric) {
// pmets.mux.Lock()
// defer pmets.mux.Unlock()
if len(pmets.window) < cap(pmets.window) {
pmets.window = append(pmets.window, m)
pmets.last = len(pmets.window) - 1
return
}
// len == cap
pmets.last = (pmets.last + 1) % cap(pmets.window)
pmets.window[pmets.last] = m
return
}
func (pmets *peerMetrics) latest() (api.Metric, error) {
// pmets.mux.RLock()
// defer pmets.mux.RUnlock()
if len(pmets.window) == 0 {
return api.Metric{}, errors.New("no metrics")
}
return pmets.window[pmets.last], nil
}
// ordered from newest to oldest
func (pmets *peerMetrics) all() []api.Metric {
// pmets.mux.RLock()
// pmets.mux.RUnlock()
wlen := len(pmets.window)
res := make([]api.Metric, 0, wlen)
if wlen == 0 {
return res
}
for i := pmets.last; i >= 0; i-- {
res = append(res, pmets.window[i])
}
for i := wlen; i > pmets.last; i-- {
res = append(res, pmets.window[i])
}
return res
}
type metricsByPeer map[peer.ID]*peerMetrics
// StdPeerMonitor is a component in charge of monitoring peers, logging
// metrics and detecting failures
type StdPeerMonitor struct {
ctx context.Context
cancel func()
rpcClient *rpc.Client
rpcReady chan struct{}
metrics map[string]metricsByPeer
metricsMux sync.RWMutex
windowCap int
alerts chan api.Alert
shutdownLock sync.Mutex
shutdown bool
wg sync.WaitGroup
}
// NewStdPeerMonitor creates a new monitor.
func NewStdPeerMonitor(windowCap int) *StdPeerMonitor {
if windowCap <= 0 {
panic("windowCap too small")
}
ctx, cancel := context.WithCancel(context.Background())
mon := &StdPeerMonitor{
ctx: ctx,
cancel: cancel,
rpcReady: make(chan struct{}, 1),
metrics: make(map[string]metricsByPeer),
windowCap: windowCap,
alerts: make(chan api.Alert),
}
go mon.run()
return mon
}
func (mon *StdPeerMonitor) run() {
select {
case <-mon.rpcReady:
//go mon.Heartbeat()
case <-mon.ctx.Done():
}
}
// SetClient saves the given rpc.Client for later use
func (mon *StdPeerMonitor) SetClient(c *rpc.Client) {
mon.rpcClient = c
mon.rpcReady <- struct{}{}
}
// Shutdown stops the peer monitor. It particular, it will
// not deliver any alerts.
func (mon *StdPeerMonitor) Shutdown() error {
mon.shutdownLock.Lock()
defer mon.shutdownLock.Unlock()
if mon.shutdown {
logger.Warning("StdPeerMonitor already shut down")
return nil
}
logger.Info("stopping StdPeerMonitor")
close(mon.rpcReady)
mon.cancel()
mon.wg.Wait()
mon.shutdown = true
return nil
}
// LogMetric stores a metric so it can later be retrieved.
func (mon *StdPeerMonitor) LogMetric(m api.Metric) {
mon.metricsMux.Lock()
defer mon.metricsMux.Unlock()
name := m.Name
peer := m.Peer
mbyp, ok := mon.metrics[name]
if !ok {
mbyp = make(metricsByPeer)
mon.metrics[name] = mbyp
}
pmets, ok := mbyp[peer]
if !ok {
pmets = newPeerMetrics(mon.windowCap)
mbyp[peer] = pmets
}
logger.Debugf("logged '%s' metric from '%s'", name, peer)
pmets.add(m)
}
// func (mon *StdPeerMonitor) getLastMetric(name string, p peer.ID) api.Metric {
// mon.metricsMux.RLock()
// defer mon.metricsMux.RUnlock()
// emptyMetric := api.Metric{
// Name: name,
// Peer: p,
// Valid: false,
// }
// mbyp, ok := mon.metrics[name]
// if !ok {
// return emptyMetric
// }
// pmets, ok := mbyp[p]
// if !ok {
// return emptyMetric
// }
// metric, err := pmets.latest()
// if err != nil {
// return emptyMetric
// }
// return metric
// }
// LastMetrics returns last known VALID metrics of a given type
func (mon *StdPeerMonitor) LastMetrics(name string) []api.Metric {
mon.metricsMux.RLock()
defer mon.metricsMux.RUnlock()
mbyp, ok := mon.metrics[name]
if !ok {
return []api.Metric{}
}
metrics := make([]api.Metric, 0, len(mbyp))
for _, peerMetrics := range mbyp {
last, err := peerMetrics.latest()
if err != nil || last.Discard() {
continue
}
metrics = append(metrics, last)
}
return metrics
}
// Alerts() returns a channel on which alerts are sent when the
// monitor detects a failure.
func (mon *StdPeerMonitor) Alerts() <-chan api.Alert {
return mon.alerts
}

100
peer_monitor_test.go Normal file
View File

@ -0,0 +1,100 @@
package ipfscluster
import (
"fmt"
"testing"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
)
var metricCounter = 0
func testPeerMonitor(t *testing.T) *StdPeerMonitor {
mock := test.NewMockRPCClient(t)
mon := NewStdPeerMonitor(2)
mon.SetClient(mock)
return mon
}
func newMetric(n string, p peer.ID) api.Metric {
m := api.Metric{
Name: n,
Peer: p,
Value: fmt.Sprintf("%d", metricCounter),
Valid: true,
}
m.SetTTL(5)
metricCounter++
return m
}
func TestPeerMonitorShutdown(t *testing.T) {
pm := testPeerMonitor(t)
err := pm.Shutdown()
if err != nil {
t.Error(err)
}
err = pm.Shutdown()
if err != nil {
t.Error(err)
}
}
func TestPeerMonitorLogMetric(t *testing.T) {
pm := testPeerMonitor(t)
defer pm.Shutdown()
metricCounter = 0
// dont fill window
pm.LogMetric(newMetric("test", test.TestPeerID1))
pm.LogMetric(newMetric("test", test.TestPeerID2))
pm.LogMetric(newMetric("test", test.TestPeerID3))
// fill window
pm.LogMetric(newMetric("test2", test.TestPeerID3))
pm.LogMetric(newMetric("test2", test.TestPeerID3))
pm.LogMetric(newMetric("test2", test.TestPeerID3))
pm.LogMetric(newMetric("test2", test.TestPeerID3))
lastMetrics := pm.LastMetrics("testbad")
if len(lastMetrics) != 0 {
t.Logf("%+v", lastMetrics)
t.Error("metrics should be empty")
}
lastMetrics = pm.LastMetrics("test")
if len(lastMetrics) != 3 {
t.Error("metrics should correspond to 3 hosts")
}
for _, v := range lastMetrics {
switch v.Peer {
case test.TestPeerID1:
if v.Value != "0" {
t.Error("bad metric value")
}
case test.TestPeerID2:
if v.Value != "1" {
t.Error("bad metric value")
}
case test.TestPeerID3:
if v.Value != "2" {
t.Error("bad metric value")
}
default:
t.Error("bad peer")
}
}
lastMetrics = pm.LastMetrics("test2")
if len(lastMetrics) != 1 {
t.Fatal("should only be one metric")
}
if lastMetrics[0].Value != fmt.Sprintf("%d", metricCounter-1) {
t.Error("metric is not last")
}
}

View File

@ -2,7 +2,6 @@ package ipfscluster
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net"
@ -12,6 +11,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
mux "github.com/gorilla/mux"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
@ -69,90 +70,6 @@ func (e errorResp) Error() string {
return e.Message
}
type versionResp struct {
Version string `json:"version"`
}
type pinResp struct {
Pinned string `json:"pinned"`
}
type unpinResp struct {
Unpinned string `json:"unpinned"`
}
type statusInfo struct {
Status string `json:"status"`
Error string `json:"error,omitempty"`
}
type statusCidResp struct {
Cid string `json:"cid"`
PeerMap map[string]statusInfo `json:"peer_map"`
}
type restIPFSIDResp struct {
ID string `json:"id"`
Addresses []string `json:"addresses"`
Error string `json:"error,omitempty"`
}
func newRestIPFSIDResp(id IPFSID) *restIPFSIDResp {
addrs := make([]string, len(id.Addresses), len(id.Addresses))
for i, a := range id.Addresses {
addrs[i] = a.String()
}
return &restIPFSIDResp{
ID: id.ID.Pretty(),
Addresses: addrs,
Error: id.Error,
}
}
type restIDResp struct {
ID string `json:"id"`
PublicKey string `json:"public_key"`
Addresses []string `json:"addresses"`
ClusterPeers []string `json:"cluster_peers"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error,omitempty"`
IPFS *restIPFSIDResp `json:"ipfs"`
}
func newRestIDResp(id ID) *restIDResp {
pubKey := ""
if id.PublicKey != nil {
keyBytes, err := id.PublicKey.Bytes()
if err == nil {
pubKey = base64.StdEncoding.EncodeToString(keyBytes)
}
}
addrs := make([]string, len(id.Addresses), len(id.Addresses))
for i, a := range id.Addresses {
addrs[i] = a.String()
}
peers := make([]string, len(id.ClusterPeers), len(id.ClusterPeers))
for i, a := range id.ClusterPeers {
peers[i] = a.String()
}
return &restIDResp{
ID: id.ID.Pretty(),
PublicKey: pubKey,
Addresses: addrs,
ClusterPeers: peers,
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: newRestIPFSIDResp(id.IPFS),
}
}
type statusResp []statusCidResp
// NewRESTAPI creates a new object which is ready to be
// started.
func NewRESTAPI(cfg *Config) (*RESTAPI, error) {
@ -209,105 +126,105 @@ func NewRESTAPI(cfg *Config) (*RESTAPI, error) {
return api, nil
}
func (api *RESTAPI) routes() []route {
func (rest *RESTAPI) routes() []route {
return []route{
{
"ID",
"GET",
"/id",
api.idHandler,
rest.idHandler,
},
{
"Version",
"GET",
"/version",
api.versionHandler,
rest.versionHandler,
},
{
"Peers",
"GET",
"/peers",
api.peerListHandler,
rest.peerListHandler,
},
{
"PeerAdd",
"POST",
"/peers",
api.peerAddHandler,
rest.peerAddHandler,
},
{
"PeerRemove",
"DELETE",
"/peers/{peer}",
api.peerRemoveHandler,
rest.peerRemoveHandler,
},
{
"Pins",
"GET",
"/pinlist",
api.pinListHandler,
rest.pinListHandler,
},
{
"StatusAll",
"GET",
"/pins",
api.statusAllHandler,
rest.statusAllHandler,
},
{
"SyncAll",
"POST",
"/pins/sync",
api.syncAllHandler,
rest.syncAllHandler,
},
{
"Status",
"GET",
"/pins/{hash}",
api.statusHandler,
rest.statusHandler,
},
{
"Pin",
"POST",
"/pins/{hash}",
api.pinHandler,
rest.pinHandler,
},
{
"Unpin",
"DELETE",
"/pins/{hash}",
api.unpinHandler,
rest.unpinHandler,
},
{
"Sync",
"POST",
"/pins/{hash}/sync",
api.syncHandler,
rest.syncHandler,
},
{
"Recover",
"POST",
"/pins/{hash}/recover",
api.recoverHandler,
rest.recoverHandler,
},
}
}
func (api *RESTAPI) run() {
api.wg.Add(1)
func (rest *RESTAPI) run() {
rest.wg.Add(1)
go func() {
defer api.wg.Done()
defer rest.wg.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
api.ctx = ctx
rest.ctx = ctx
<-api.rpcReady
<-rest.rpcReady
logger.Infof("REST API: %s", api.apiAddr)
err := api.server.Serve(api.listener)
logger.Infof("REST API: %s", rest.apiAddr)
err := rest.server.Serve(rest.listener)
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
@ -315,79 +232,68 @@ func (api *RESTAPI) run() {
}
// Shutdown stops any API listeners.
func (api *RESTAPI) Shutdown() error {
api.shutdownLock.Lock()
defer api.shutdownLock.Unlock()
func (rest *RESTAPI) Shutdown() error {
rest.shutdownLock.Lock()
defer rest.shutdownLock.Unlock()
if api.shutdown {
if rest.shutdown {
logger.Debug("already shutdown")
return nil
}
logger.Info("stopping Cluster API")
close(api.rpcReady)
close(rest.rpcReady)
// Cancel any outstanding ops
api.server.SetKeepAlivesEnabled(false)
api.listener.Close()
rest.server.SetKeepAlivesEnabled(false)
rest.listener.Close()
api.wg.Wait()
api.shutdown = true
rest.wg.Wait()
rest.shutdown = true
return nil
}
// SetClient makes the component ready to perform RPC
// requests.
func (api *RESTAPI) SetClient(c *rpc.Client) {
api.rpcClient = c
api.rpcReady <- struct{}{}
func (rest *RESTAPI) SetClient(c *rpc.Client) {
rest.rpcClient = c
rest.rpcReady <- struct{}{}
}
func (api *RESTAPI) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := IDSerial{}
err := api.rpcClient.Call("",
func (rest *RESTAPI) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := api.IDSerial{}
err := rest.rpcClient.Call("",
"Cluster",
"ID",
struct{}{},
&idSerial)
if checkRPCErr(w, err) {
resp := newRestIDResp(idSerial.ToID())
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, idSerial)
}
func (api *RESTAPI) versionHandler(w http.ResponseWriter, r *http.Request) {
var v string
err := api.rpcClient.Call("",
func (rest *RESTAPI) versionHandler(w http.ResponseWriter, r *http.Request) {
var v api.Version
err := rest.rpcClient.Call("",
"Cluster",
"Version",
struct{}{},
&v)
if checkRPCErr(w, err) {
sendJSONResponse(w, 200, versionResp{v})
}
sendResponse(w, err, v)
}
func (api *RESTAPI) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []IDSerial
err := api.rpcClient.Call("",
func (rest *RESTAPI) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []api.IDSerial
err := rest.rpcClient.Call("",
"Cluster",
"Peers",
struct{}{},
&peersSerial)
if checkRPCErr(w, err) {
var resp []*restIDResp
for _, pS := range peersSerial {
p := pS.ToID()
resp = append(resp, newRestIDResp(p))
}
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, peersSerial)
}
func (api *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
func (rest *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
@ -404,145 +310,123 @@ func (api *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
return
}
var ids IDSerial
err = api.rpcClient.Call("",
var ids api.IDSerial
err = rest.rpcClient.Call("",
"Cluster",
"PeerAdd",
MultiaddrToSerial(mAddr),
api.MultiaddrToSerial(mAddr),
&ids)
if checkRPCErr(w, err) {
resp := newRestIDResp(ids.ToID())
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, ids)
}
func (api *RESTAPI) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
func (rest *RESTAPI) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
if p := parsePidOrError(w, r); p != "" {
err := api.rpcClient.Call("",
err := rest.rpcClient.Call("",
"Cluster",
"PeerRemove",
p,
&struct{}{})
if checkRPCErr(w, err) {
sendEmptyResponse(w)
}
sendEmptyResponse(w, err)
}
}
func (api *RESTAPI) pinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
err := api.rpcClient.Call("",
func (rest *RESTAPI) pinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
err := rest.rpcClient.Call("",
"Cluster",
"Pin",
c,
&struct{}{})
if checkRPCErr(w, err) {
sendAcceptedResponse(w)
}
sendAcceptedResponse(w, err)
}
}
func (api *RESTAPI) unpinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
err := api.rpcClient.Call("",
func (rest *RESTAPI) unpinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
err := rest.rpcClient.Call("",
"Cluster",
"Unpin",
c,
&struct{}{})
if checkRPCErr(w, err) {
sendAcceptedResponse(w)
}
sendAcceptedResponse(w, err)
}
}
func (api *RESTAPI) pinListHandler(w http.ResponseWriter, r *http.Request) {
var pins []string
err := api.rpcClient.Call("",
func (rest *RESTAPI) pinListHandler(w http.ResponseWriter, r *http.Request) {
var pins []api.CidArgSerial
err := rest.rpcClient.Call("",
"Cluster",
"PinList",
struct{}{},
&pins)
if checkRPCErr(w, err) {
sendJSONResponse(w, 200, pins)
}
sendResponse(w, err, pins)
}
func (api *RESTAPI) statusAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) statusAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"StatusAll",
struct{}{},
&pinInfos)
if checkRPCErr(w, err) {
sendStatusResponse(w, http.StatusOK, pinInfos)
}
sendResponse(w, err, pinInfos)
}
func (api *RESTAPI) statusHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) statusHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Status",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func (api *RESTAPI) syncAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) syncAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"SyncAll",
struct{}{},
&pinInfos)
if checkRPCErr(w, err) {
sendStatusResponse(w, http.StatusAccepted, pinInfos)
}
sendResponse(w, err, pinInfos)
}
func (api *RESTAPI) syncHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) syncHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Sync",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func (api *RESTAPI) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Recover",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func parseCidOrError(w http.ResponseWriter, r *http.Request) *CidArg {
func parseCidOrError(w http.ResponseWriter, r *http.Request) api.CidArgSerial {
vars := mux.Vars(r)
hash := vars["hash"]
_, err := cid.Decode(hash)
if err != nil {
sendErrorResponse(w, 400, "error decoding Cid: "+err.Error())
return nil
return api.CidArgSerial{Cid: ""}
}
return &CidArg{hash}
return api.CidArgSerial{Cid: hash}
}
func parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID {
@ -556,6 +440,12 @@ func parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID {
return pid
}
func sendResponse(w http.ResponseWriter, rpcErr error, resp interface{}) {
if checkRPCErr(w, rpcErr) {
sendJSONResponse(w, 200, resp)
}
}
// checkRPCErr takes care of returning standard error responses if we
// pass an error to it. It returns true when everythings OK (no error
// was handled), or false otherwise.
@ -567,12 +457,16 @@ func checkRPCErr(w http.ResponseWriter, err error) bool {
return true
}
func sendEmptyResponse(w http.ResponseWriter) {
w.WriteHeader(http.StatusNoContent)
func sendEmptyResponse(w http.ResponseWriter, rpcErr error) {
if checkRPCErr(w, rpcErr) {
w.WriteHeader(http.StatusNoContent)
}
}
func sendAcceptedResponse(w http.ResponseWriter) {
w.WriteHeader(http.StatusAccepted)
func sendAcceptedResponse(w http.ResponseWriter, rpcErr error) {
if checkRPCErr(w, rpcErr) {
w.WriteHeader(http.StatusAccepted)
}
}
func sendJSONResponse(w http.ResponseWriter, code int, resp interface{}) {
@ -587,30 +481,3 @@ func sendErrorResponse(w http.ResponseWriter, code int, msg string) {
logger.Errorf("sending error response: %d: %s", code, msg)
sendJSONResponse(w, code, errorResp)
}
func transformPinToStatusCid(p GlobalPinInfo) statusCidResp {
s := statusCidResp{}
s.Cid = p.Cid.String()
s.PeerMap = make(map[string]statusInfo)
for k, v := range p.PeerMap {
s.PeerMap[k.Pretty()] = statusInfo{
Status: v.Status.String(),
Error: v.Error,
}
}
return s
}
func sendStatusResponse(w http.ResponseWriter, code int, data []GlobalPinInfo) {
pins := make(statusResp, 0, len(data))
for _, d := range data {
pins = append(pins, transformPinToStatusCid(d))
}
sendJSONResponse(w, code, pins)
}
func sendStatusCidResponse(w http.ResponseWriter, code int, data GlobalPinInfo) {
st := transformPinToStatusCid(data)
sendJSONResponse(w, code, st)
}

View File

@ -7,6 +7,9 @@ import (
"io/ioutil"
"net/http"
"testing"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/test"
)
var (
@ -16,16 +19,16 @@ var (
func testRESTAPI(t *testing.T) *RESTAPI {
//logging.SetDebugLogging()
cfg := testingConfig()
api, err := NewRESTAPI(cfg)
rest, err := NewRESTAPI(cfg)
if err != nil {
t.Fatal("should be able to create a new Api: ", err)
}
// No keep alive! Otherwise tests hang with
// connections re-used from previous tests
api.server.SetKeepAlivesEnabled(false)
api.SetClient(mockRPCClient(t))
return api
rest.server.SetKeepAlivesEnabled(false)
rest.SetClient(test.NewMockRPCClient(t))
return rest
}
func processResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) {
@ -65,29 +68,29 @@ func makeDelete(t *testing.T, path string, resp interface{}) {
}
func TestRESTAPIShutdown(t *testing.T) {
api := testRESTAPI(t)
err := api.Shutdown()
rest := testRESTAPI(t)
err := rest.Shutdown()
if err != nil {
t.Error("should shutdown cleanly: ", err)
}
// test shutting down twice
api.Shutdown()
rest.Shutdown()
}
func TestRestAPIIDEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
id := restIDResp{}
rest := testRESTAPI(t)
defer rest.Shutdown()
id := api.IDSerial{}
makeGet(t, "/id", &id)
if id.ID != testPeerID.Pretty() {
if id.ID != test.TestPeerID1.Pretty() {
t.Error("expected correct id")
}
}
func TestRESTAPIVersionEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
ver := versionResp{}
rest := testRESTAPI(t)
defer rest.Shutdown()
ver := api.Version{}
makeGet(t, "/version", &ver)
if ver.Version != "0.0.mock" {
t.Error("expected correct version")
@ -95,30 +98,30 @@ func TestRESTAPIVersionEndpoint(t *testing.T) {
}
func TestRESTAPIPeerstEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var list []restIDResp
var list []api.IDSerial
makeGet(t, "/peers", &list)
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID != testPeerID.Pretty() {
if list[0].ID != test.TestPeerID1.Pretty() {
t.Error("expected a different peer id list: ", list)
}
}
func TestRESTAPIPeerAddEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
id := restIDResp{}
id := api.IDSerial{}
// post with valid body
body := fmt.Sprintf("{\"peer_multiaddress\":\"/ip4/1.2.3.4/tcp/1234/ipfs/%s\"}", testPeerID.Pretty())
body := fmt.Sprintf("{\"peer_multiaddress\":\"/ip4/1.2.3.4/tcp/1234/ipfs/%s\"}", test.TestPeerID1.Pretty())
t.Log(body)
makePost(t, "/peers", []byte(body), &id)
if id.ID != testPeerID.Pretty() {
if id.ID != test.TestPeerID1.Pretty() {
t.Error("expected correct ID")
}
if id.Error != "" {
@ -139,22 +142,22 @@ func TestRESTAPIPeerAddEndpoint(t *testing.T) {
}
func TestRESTAPIPeerRemoveEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
makeDelete(t, "/peers/"+testPeerID.Pretty(), &struct{}{})
makeDelete(t, "/peers/"+test.TestPeerID1.Pretty(), &struct{}{})
}
func TestRESTAPIPinEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
// test regular post
makePost(t, "/pins/"+testCid, []byte{}, &struct{}{})
makePost(t, "/pins/"+test.TestCid1, []byte{}, &struct{}{})
errResp := errorResp{}
makePost(t, "/pins/"+errorCid, []byte{}, &errResp)
if errResp.Message != errBadCid.Error() {
makePost(t, "/pins/"+test.ErrorCid, []byte{}, &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -165,15 +168,15 @@ func TestRESTAPIPinEndpoint(t *testing.T) {
}
func TestRESTAPIUnpinEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
// test regular delete
makeDelete(t, "/pins/"+testCid, &struct{}{})
makeDelete(t, "/pins/"+test.TestCid1, &struct{}{})
errResp := errorResp{}
makeDelete(t, "/pins/"+errorCid, &errResp)
if errResp.Message != errBadCid.Error() {
makeDelete(t, "/pins/"+test.ErrorCid, &errResp)
if errResp.Message != test.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
@ -184,44 +187,44 @@ func TestRESTAPIUnpinEndpoint(t *testing.T) {
}
func TestRESTAPIPinListEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp []string
var resp []api.CidArgSerial
makeGet(t, "/pinlist", &resp)
if len(resp) != 3 ||
resp[0] != testCid1 || resp[1] != testCid2 ||
resp[2] != testCid3 {
resp[0].Cid != test.TestCid1 || resp[1].Cid != test.TestCid2 ||
resp[2].Cid != test.TestCid3 {
t.Error("unexpected pin list: ", resp)
}
}
func TestRESTAPIStatusAllEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusResp
var resp []api.GlobalPinInfoSerial
makeGet(t, "/pins", &resp)
if len(resp) != 3 ||
resp[0].Cid != testCid1 ||
resp[1].PeerMap[testPeerID.Pretty()].Status != "pinning" {
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusResp:\n %+v", resp)
}
}
func TestRESTAPIStatusEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
makeGet(t, "/pins/"+testCid, &resp)
var resp api.GlobalPinInfoSerial
makeGet(t, "/pins/"+test.TestCid1, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")
@ -229,32 +232,32 @@ func TestRESTAPIStatusEndpoint(t *testing.T) {
}
func TestRESTAPISyncAllEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusResp
var resp []api.GlobalPinInfoSerial
makePost(t, "/pins/sync", []byte{}, &resp)
if len(resp) != 3 ||
resp[0].Cid != testCid1 ||
resp[1].PeerMap[testPeerID.Pretty()].Status != "pinning" {
resp[0].Cid != test.TestCid1 ||
resp[1].PeerMap[test.TestPeerID1.Pretty()].Status != "pinning" {
t.Errorf("unexpected statusResp:\n %+v", resp)
}
}
func TestRESTAPISyncEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
makePost(t, "/pins/"+testCid+"/sync", []byte{}, &resp)
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+test.TestCid1+"/sync", []byte{}, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")
@ -262,18 +265,18 @@ func TestRESTAPISyncEndpoint(t *testing.T) {
}
func TestRESTAPIRecoverEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
makePost(t, "/pins/"+testCid+"/recover", []byte{}, &resp)
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+test.TestCid1+"/recover", []byte{}, &resp)
if resp.Cid != testCid {
if resp.Cid != test.TestCid1 {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[testPeerID.Pretty()]
info, ok := resp.PeerMap[test.TestPeerID1.Pretty()]
if !ok {
t.Fatal("expected info for testPeerID")
t.Fatal("expected info for test.TestPeerID1")
}
if info.Status != "pinned" {
t.Error("expected different status")

View File

@ -3,8 +3,9 @@ package ipfscluster
import (
"errors"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
// RPCAPI is a go-libp2p-gorpc service which provides the internal ipfs-cluster
@ -15,31 +16,7 @@ import (
// the different components of ipfs-cluster, with very little added logic.
// Refer to documentation on those methods for details on their behaviour.
type RPCAPI struct {
cluster *Cluster
}
// CidArg is an arguments that carry a Cid. It may carry more things in the
// future.
type CidArg struct {
Cid string
}
// NewCidArg returns a CidArg which carries the given Cid. It panics if it is
// nil.
func NewCidArg(c *cid.Cid) *CidArg {
if c == nil {
panic("Cid cannot be nil")
}
return &CidArg{c.String()}
}
// CID decodes and returns a Cid from a CidArg.
func (arg *CidArg) CID() (*cid.Cid, error) {
c, err := cid.Decode(arg.Cid)
if err != nil {
return nil, err
}
return c, nil
c *Cluster
}
/*
@ -47,51 +24,45 @@ func (arg *CidArg) CID() (*cid.Cid, error) {
*/
// ID runs Cluster.ID()
func (api *RPCAPI) ID(in struct{}, out *IDSerial) error {
id := api.cluster.ID().ToSerial()
func (rpcapi *RPCAPI) ID(in struct{}, out *api.IDSerial) error {
id := rpcapi.c.ID().ToSerial()
*out = id
return nil
}
// Pin runs Cluster.Pin().
func (api *RPCAPI) Pin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.Pin(c)
func (rpcapi *RPCAPI) Pin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.Pin(c)
}
// Unpin runs Cluster.Unpin().
func (api *RPCAPI) Unpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.Unpin(c)
func (rpcapi *RPCAPI) Unpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.Unpin(c)
}
// PinList runs Cluster.Pins().
func (api *RPCAPI) PinList(in struct{}, out *[]string) error {
cidList := api.cluster.Pins()
cidStrList := make([]string, 0, len(cidList))
func (rpcapi *RPCAPI) PinList(in struct{}, out *[]api.CidArgSerial) error {
cidList := rpcapi.c.Pins()
cidSerialList := make([]api.CidArgSerial, 0, len(cidList))
for _, c := range cidList {
cidStrList = append(cidStrList, c.String())
cidSerialList = append(cidSerialList, c.ToSerial())
}
*out = cidStrList
*out = cidSerialList
return nil
}
// Version runs Cluster.Version().
func (api *RPCAPI) Version(in struct{}, out *string) error {
*out = api.cluster.Version()
func (rpcapi *RPCAPI) Version(in struct{}, out *api.Version) error {
*out = api.Version{rpcapi.c.Version()}
return nil
}
// Peers runs Cluster.Peers().
func (api *RPCAPI) Peers(in struct{}, out *[]IDSerial) error {
peers := api.cluster.Peers()
var sPeers []IDSerial
func (rpcapi *RPCAPI) Peers(in struct{}, out *[]api.IDSerial) error {
peers := rpcapi.c.Peers()
var sPeers []api.IDSerial
for _, p := range peers {
sPeers = append(sPeers, p.ToSerial())
}
@ -100,94 +71,82 @@ func (api *RPCAPI) Peers(in struct{}, out *[]IDSerial) error {
}
// PeerAdd runs Cluster.PeerAdd().
func (api *RPCAPI) PeerAdd(in MultiaddrSerial, out *IDSerial) error {
func (rpcapi *RPCAPI) PeerAdd(in api.MultiaddrSerial, out *api.IDSerial) error {
addr := in.ToMultiaddr()
id, err := api.cluster.PeerAdd(addr)
id, err := rpcapi.c.PeerAdd(addr)
*out = id.ToSerial()
return err
}
// PeerRemove runs Cluster.PeerRm().
func (api *RPCAPI) PeerRemove(in peer.ID, out *struct{}) error {
return api.cluster.PeerRemove(in)
func (rpcapi *RPCAPI) PeerRemove(in peer.ID, out *struct{}) error {
return rpcapi.c.PeerRemove(in)
}
// Join runs Cluster.Join().
func (api *RPCAPI) Join(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Join(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := api.cluster.Join(addr)
err := rpcapi.c.Join(addr)
return err
}
// StatusAll runs Cluster.StatusAll().
func (api *RPCAPI) StatusAll(in struct{}, out *[]GlobalPinInfo) error {
pinfo, err := api.cluster.StatusAll()
*out = pinfo
func (rpcapi *RPCAPI) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
pinfos, err := rpcapi.c.StatusAll()
*out = globalPinInfoSliceToSerial(pinfos)
return err
}
// Status runs Cluster.Status().
func (api *RPCAPI) Status(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Status(c)
*out = pinfo
func (rpcapi *RPCAPI) Status(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Status(c)
*out = pinfo.ToSerial()
return err
}
// SyncAllLocal runs Cluster.SyncAllLocal().
func (api *RPCAPI) SyncAllLocal(in struct{}, out *[]PinInfo) error {
pinfo, err := api.cluster.SyncAllLocal()
*out = pinfo
func (rpcapi *RPCAPI) SyncAllLocal(in struct{}, out *[]api.PinInfoSerial) error {
pinfos, err := rpcapi.c.SyncAllLocal()
*out = pinInfoSliceToSerial(pinfos)
return err
}
// SyncLocal runs Cluster.SyncLocal().
func (api *RPCAPI) SyncLocal(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.SyncLocal(c)
*out = pinfo
func (rpcapi *RPCAPI) SyncLocal(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.SyncLocal(c)
*out = pinfo.ToSerial()
return err
}
// SyncAll runs Cluster.SyncAll().
func (api *RPCAPI) SyncAll(in struct{}, out *[]GlobalPinInfo) error {
pinfo, err := api.cluster.SyncAll()
*out = pinfo
func (rpcapi *RPCAPI) SyncAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
pinfos, err := rpcapi.c.SyncAll()
*out = globalPinInfoSliceToSerial(pinfos)
return err
}
// Sync runs Cluster.Sync().
func (api *RPCAPI) Sync(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Sync(c)
*out = pinfo
func (rpcapi *RPCAPI) Sync(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Sync(c)
*out = pinfo.ToSerial()
return err
}
// StateSync runs Cluster.StateSync().
func (api *RPCAPI) StateSync(in struct{}, out *[]PinInfo) error {
pinfo, err := api.cluster.StateSync()
*out = pinfo
func (rpcapi *RPCAPI) StateSync(in struct{}, out *[]api.PinInfoSerial) error {
pinfos, err := rpcapi.c.StateSync()
*out = pinInfoSliceToSerial(pinfos)
return err
}
// Recover runs Cluster.Recover().
func (api *RPCAPI) Recover(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Recover(c)
*out = pinfo
func (rpcapi *RPCAPI) Recover(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Recover(c)
*out = pinfo.ToSerial()
return err
}
@ -196,48 +155,35 @@ func (api *RPCAPI) Recover(in *CidArg, out *GlobalPinInfo) error {
*/
// Track runs PinTracker.Track().
func (api *RPCAPI) Track(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.tracker.Track(c)
func (rpcapi *RPCAPI) Track(in api.CidArgSerial, out *struct{}) error {
return rpcapi.c.tracker.Track(in.ToCidArg())
}
// Untrack runs PinTracker.Untrack().
func (api *RPCAPI) Untrack(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.tracker.Untrack(c)
func (rpcapi *RPCAPI) Untrack(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.tracker.Untrack(c)
}
// TrackerStatusAll runs PinTracker.StatusAll().
func (api *RPCAPI) TrackerStatusAll(in struct{}, out *[]PinInfo) error {
*out = api.cluster.tracker.StatusAll()
func (rpcapi *RPCAPI) TrackerStatusAll(in struct{}, out *[]api.PinInfoSerial) error {
*out = pinInfoSliceToSerial(rpcapi.c.tracker.StatusAll())
return nil
}
// TrackerStatus runs PinTracker.Status().
func (api *RPCAPI) TrackerStatus(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo := api.cluster.tracker.Status(c)
*out = pinfo
func (rpcapi *RPCAPI) TrackerStatus(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo := rpcapi.c.tracker.Status(c)
*out = pinfo.ToSerial()
return nil
}
// TrackerRecover runs PinTracker.Recover().
func (api *RPCAPI) TrackerRecover(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.tracker.Recover(c)
*out = pinfo
func (rpcapi *RPCAPI) TrackerRecover(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.tracker.Recover(c)
*out = pinfo.ToSerial()
return err
}
@ -246,37 +192,28 @@ func (api *RPCAPI) TrackerRecover(in *CidArg, out *PinInfo) error {
*/
// IPFSPin runs IPFSConnector.Pin().
func (api *RPCAPI) IPFSPin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.ipfs.Pin(c)
func (rpcapi *RPCAPI) IPFSPin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.ipfs.Pin(c)
}
// IPFSUnpin runs IPFSConnector.Unpin().
func (api *RPCAPI) IPFSUnpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.ipfs.Unpin(c)
func (rpcapi *RPCAPI) IPFSUnpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.ipfs.Unpin(c)
}
// IPFSPinLsCid runs IPFSConnector.PinLsCid().
func (api *RPCAPI) IPFSPinLsCid(in *CidArg, out *IPFSPinStatus) error {
c, err := in.CID()
if err != nil {
return err
}
b, err := api.cluster.ipfs.PinLsCid(c)
func (rpcapi *RPCAPI) IPFSPinLsCid(in api.CidArgSerial, out *api.IPFSPinStatus) error {
c := in.ToCidArg().Cid
b, err := rpcapi.c.ipfs.PinLsCid(c)
*out = b
return err
}
// IPFSPinLs runs IPFSConnector.PinLs().
func (api *RPCAPI) IPFSPinLs(in struct{}, out *map[string]IPFSPinStatus) error {
m, err := api.cluster.ipfs.PinLs()
func (rpcapi *RPCAPI) IPFSPinLs(in string, out *map[string]api.IPFSPinStatus) error {
m, err := rpcapi.c.ipfs.PinLs(in)
*out = m
return err
}
@ -286,32 +223,26 @@ func (api *RPCAPI) IPFSPinLs(in struct{}, out *map[string]IPFSPinStatus) error {
*/
// ConsensusLogPin runs Consensus.LogPin().
func (api *RPCAPI) ConsensusLogPin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.consensus.LogPin(c)
func (rpcapi *RPCAPI) ConsensusLogPin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg()
return rpcapi.c.consensus.LogPin(c)
}
// ConsensusLogUnpin runs Consensus.LogUnpin().
func (api *RPCAPI) ConsensusLogUnpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.consensus.LogUnpin(c)
func (rpcapi *RPCAPI) ConsensusLogUnpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg()
return rpcapi.c.consensus.LogUnpin(c)
}
// ConsensusLogAddPeer runs Consensus.LogAddPeer().
func (api *RPCAPI) ConsensusLogAddPeer(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogAddPeer(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
return api.cluster.consensus.LogAddPeer(addr)
return rpcapi.c.consensus.LogAddPeer(addr)
}
// ConsensusLogRmPeer runs Consensus.LogRmPeer().
func (api *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
return api.cluster.consensus.LogRmPeer(in)
func (rpcapi *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
return rpcapi.c.consensus.LogRmPeer(in)
}
/*
@ -319,27 +250,49 @@ func (api *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
*/
// PeerManagerAddPeer runs peerManager.addPeer().
func (api *RPCAPI) PeerManagerAddPeer(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) PeerManagerAddPeer(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := api.cluster.peerManager.addPeer(addr)
err := rpcapi.c.peerManager.addPeer(addr)
return err
}
// PeerManagerAddFromMultiaddrs runs peerManager.addFromMultiaddrs().
func (api *RPCAPI) PeerManagerAddFromMultiaddrs(in MultiaddrsSerial, out *struct{}) error {
func (rpcapi *RPCAPI) PeerManagerAddFromMultiaddrs(in api.MultiaddrsSerial, out *struct{}) error {
addrs := in.ToMultiaddrs()
err := api.cluster.peerManager.addFromMultiaddrs(addrs)
err := rpcapi.c.peerManager.addFromMultiaddrs(addrs)
return err
}
// PeerManagerRmPeerShutdown runs peerManager.rmPeer().
func (api *RPCAPI) PeerManagerRmPeerShutdown(in peer.ID, out *struct{}) error {
return api.cluster.peerManager.rmPeer(in, true)
func (rpcapi *RPCAPI) PeerManagerRmPeerShutdown(in peer.ID, out *struct{}) error {
return rpcapi.c.peerManager.rmPeer(in, true)
}
// PeerManagerRmPeer runs peerManager.rmPeer().
func (api *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
return api.cluster.peerManager.rmPeer(in, false)
func (rpcapi *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
return rpcapi.c.peerManager.rmPeer(in, false)
}
// PeerManagerPeers runs peerManager.peers().
func (rpcapi *RPCAPI) PeerManagerPeers(in struct{}, out *[]peer.ID) error {
*out = rpcapi.c.peerManager.peers()
return nil
}
/*
PeerMonitor
*/
// PeerMonitorLogMetric runs PeerMonitor.LogMetric().
func (rpcapi *RPCAPI) PeerMonitorLogMetric(in api.Metric, out *struct{}) error {
rpcapi.c.monitor.LogMetric(in)
return nil
}
// PeerMonitorLastMetrics runs PeerMonitor.LastMetrics().
func (rpcapi *RPCAPI) PeerMonitorLastMetrics(in string, out *[]api.Metric) error {
*out = rpcapi.c.monitor.LastMetrics(in)
return nil
}
/*
@ -350,11 +303,11 @@ func (api *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
// This is necessary for a peer to figure out which of its multiaddresses the
// peers are seeing (also when crossing NATs). It should be called from
// the peer the IN parameter indicates.
func (api *RPCAPI) RemoteMultiaddrForPeer(in peer.ID, out *MultiaddrSerial) error {
conns := api.cluster.host.Network().ConnsToPeer(in)
func (rpcapi *RPCAPI) RemoteMultiaddrForPeer(in peer.ID, out *api.MultiaddrSerial) error {
conns := rpcapi.c.host.Network().ConnsToPeer(in)
if len(conns) == 0 {
return errors.New("no connections to: " + in.Pretty())
}
*out = MultiaddrToSerial(multiaddrJoin(conns[0].RemoteMultiaddr(), in))
*out = api.MultiaddrToSerial(multiaddrJoin(conns[0].RemoteMultiaddr(), in))
return nil
}

View File

@ -1,170 +0,0 @@
package ipfscluster
import (
"errors"
"testing"
"time"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
)
var errBadCid = errors.New("this is an expected error when using errorCid")
type mockService struct{}
func mockRPCClient(t *testing.T) *rpc.Client {
s := rpc.NewServer(nil, "mock")
c := rpc.NewClientWithServer(nil, "mock", s)
err := s.RegisterName("Cluster", &mockService{})
if err != nil {
t.Fatal(err)
}
return c
}
func (mock *mockService) Pin(in *CidArg, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
}
return nil
}
func (mock *mockService) Unpin(in *CidArg, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
}
return nil
}
func (mock *mockService) PinList(in struct{}, out *[]string) error {
*out = []string{testCid, testCid2, testCid3}
return nil
}
func (mock *mockService) ID(in struct{}, out *IDSerial) error {
_, pubkey, _ := crypto.GenerateKeyPair(
DefaultConfigCrypto,
DefaultConfigKeyLength)
*out = ID{
ID: testPeerID,
PublicKey: pubkey,
Version: "0.0.mock",
IPFS: IPFSID{
ID: testPeerID,
},
}.ToSerial()
return nil
}
func (mock *mockService) Version(in struct{}, out *string) error {
*out = "0.0.mock"
return nil
}
func (mock *mockService) Peers(in struct{}, out *[]IDSerial) error {
id := IDSerial{}
mock.ID(in, &id)
*out = []IDSerial{id}
return nil
}
func (mock *mockService) PeerAdd(in MultiaddrSerial, out *IDSerial) error {
id := IDSerial{}
mock.ID(struct{}{}, &id)
*out = id
return nil
}
func (mock *mockService) PeerRemove(in peer.ID, out *struct{}) error {
return nil
}
func (mock *mockService) StatusAll(in struct{}, out *[]GlobalPinInfo) error {
c1, _ := cid.Decode(testCid1)
c2, _ := cid.Decode(testCid2)
c3, _ := cid.Decode(testCid3)
*out = []GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]PinInfo{
testPeerID: {
CidStr: testCid1,
Peer: testPeerID,
Status: TrackerStatusPinned,
TS: time.Now(),
},
},
},
{
Cid: c2,
PeerMap: map[peer.ID]PinInfo{
testPeerID: {
CidStr: testCid2,
Peer: testPeerID,
Status: TrackerStatusPinning,
TS: time.Now(),
},
},
},
{
Cid: c3,
PeerMap: map[peer.ID]PinInfo{
testPeerID: {
CidStr: testCid3,
Peer: testPeerID,
Status: TrackerStatusPinError,
TS: time.Now(),
},
},
},
}
return nil
}
func (mock *mockService) Status(in *CidArg, out *GlobalPinInfo) error {
if in.Cid == errorCid {
return errBadCid
}
c1, _ := cid.Decode(testCid1)
*out = GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]PinInfo{
testPeerID: {
CidStr: testCid1,
Peer: testPeerID,
Status: TrackerStatusPinned,
TS: time.Now(),
},
},
}
return nil
}
func (mock *mockService) SyncAll(in struct{}, out *[]GlobalPinInfo) error {
return mock.StatusAll(in, out)
}
func (mock *mockService) Sync(in *CidArg, out *GlobalPinInfo) error {
return mock.Status(in, out)
}
func (mock *mockService) StateSync(in struct{}, out *[]PinInfo) error {
*out = []PinInfo{}
return nil
}
func (mock *mockService) Recover(in *CidArg, out *GlobalPinInfo) error {
return mock.Status(in, out)
}
func (mock *mockService) Track(in *CidArg, out *struct{}) error {
return nil
}
func (mock *mockService) Untrack(in *CidArg, out *struct{}) error {
return nil
}

View File

@ -0,0 +1,71 @@
package mapstate
import (
"sync"
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
)
const Version = 1
// MapState is a very simple database to store the state of the system
// using a Go map. It is thread safe. It implements the State interface.
type MapState struct {
pinMux sync.RWMutex
PinMap map[string]api.CidArgSerial
Version int
}
// NewMapState initializes the internal map and returns a new MapState object.
func NewMapState() *MapState {
return &MapState{
PinMap: make(map[string]api.CidArgSerial),
}
}
// Add adds a CidArg to the internal map.
func (st *MapState) Add(c api.CidArg) error {
st.pinMux.Lock()
defer st.pinMux.Unlock()
st.PinMap[c.Cid.String()] = c.ToSerial()
return nil
}
// Rm removes a Cid from the internal map.
func (st *MapState) Rm(c *cid.Cid) error {
st.pinMux.Lock()
defer st.pinMux.Unlock()
delete(st.PinMap, c.String())
return nil
}
func (st *MapState) Get(c *cid.Cid) api.CidArg {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
cargs, ok := st.PinMap[c.String()]
if !ok { // make sure no panics
return api.CidArg{}
}
return cargs.ToCidArg()
}
// Has returns true if the Cid belongs to the State.
func (st *MapState) Has(c *cid.Cid) bool {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
_, ok := st.PinMap[c.String()]
return ok
}
// List provides the list of tracked CidArgs.
func (st *MapState) List() []api.CidArg {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
cids := make([]api.CidArg, 0, len(st.PinMap))
for _, v := range st.PinMap {
cids = append(cids, v.ToCidArg())
}
return cids
}

View File

@ -0,0 +1,68 @@
package mapstate
import (
"testing"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
var testCid1, _ = cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
var testPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
var c = api.CidArg{
Cid: testCid1,
Allocations: []peer.ID{testPeerID1},
Everywhere: false,
}
func TestAdd(t *testing.T) {
ms := NewMapState()
ms.Add(c)
if !ms.Has(c.Cid) {
t.Error("should have added it")
}
}
func TestRm(t *testing.T) {
ms := NewMapState()
ms.Add(c)
ms.Rm(c.Cid)
if ms.Has(c.Cid) {
t.Error("should have removed it")
}
}
func TestGet(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
ms := NewMapState()
ms.Add(c)
get := ms.Get(c.Cid)
if get.Cid.String() != c.Cid.String() ||
get.Allocations[0] != c.Allocations[0] ||
get.Everywhere != c.Everywhere {
t.Error("returned something different")
}
}
func TestList(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("paniced")
}
}()
ms := NewMapState()
ms.Add(c)
list := ms.List()
if list[0].Cid.String() != c.Cid.String() ||
list[0].Allocations[0] != c.Allocations[0] ||
list[0].Everywhere != c.Everywhere {
t.Error("returned something different")
}
}

13
test/cids.go Normal file
View File

@ -0,0 +1,13 @@
package test
import peer "github.com/libp2p/go-libp2p-peer"
var (
TestCid1 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq"
TestCid2 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma"
TestCid3 = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb"
ErrorCid = "QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc"
TestPeerID1, _ = peer.IDB58Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ = peer.IDB58Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ = peer.IDB58Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
)

View File

@ -1,4 +1,4 @@
package ipfscluster
package test
import (
"encoding/json"
@ -9,17 +9,18 @@ import (
"strconv"
"strings"
"github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/state/mapstate"
cid "github.com/ipfs/go-cid"
)
// This is an ipfs daemon mock which should sustain the functionality used by
// ipfscluster.
type ipfsMock struct {
// IpfsMock is an ipfs daemon mock which should sustain the functionality used by ipfscluster.
type IpfsMock struct {
server *httptest.Server
addr string
port int
pinMap *MapState
Addr string
Port int
pinMap *mapstate.MapState
}
type mockPinResp struct {
@ -39,9 +40,15 @@ type ipfsErr struct {
Message string
}
func newIpfsMock() *ipfsMock {
st := NewMapState()
m := &ipfsMock{
type idResp struct {
ID string
Addresses []string
}
// NewIpfsMock returns a new mock.
func NewIpfsMock() *IpfsMock {
st := mapstate.NewMapState()
m := &IpfsMock{
pinMap: st,
}
ts := httptest.NewServer(http.HandlerFunc(m.handler))
@ -51,21 +58,21 @@ func newIpfsMock() *ipfsMock {
h := strings.Split(url.Host, ":")
i, _ := strconv.Atoi(h[1])
m.port = i
m.addr = h[0]
m.Port = i
m.Addr = h[0]
return m
}
// FIXME: what if IPFS API changes?
func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) {
p := r.URL.Path
endp := strings.TrimPrefix(p, "/api/v0/")
var cidStr string
switch endp {
case "id":
resp := ipfsIDResp{
ID: testPeerID.Pretty(),
resp := idResp{
ID: TestPeerID1.Pretty(),
Addresses: []string{
"/ip4/0.0.0.0/tcp/1234",
},
@ -79,14 +86,14 @@ func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
goto ERROR
}
cidStr = arg[0]
if cidStr == errorCid {
if cidStr == ErrorCid {
goto ERROR
}
c, err := cid.Decode(cidStr)
if err != nil {
goto ERROR
}
m.pinMap.AddPin(c)
m.pinMap.Add(api.CidArgCid(c))
resp := mockPinResp{
Pins: []string{cidStr},
}
@ -103,7 +110,7 @@ func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
if err != nil {
goto ERROR
}
m.pinMap.RmPin(c)
m.pinMap.Rm(c)
resp := mockPinResp{
Pins: []string{cidStr},
}
@ -114,9 +121,9 @@ func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
arg, ok := query["arg"]
if !ok {
rMap := make(map[string]mockPinType)
pins := m.pinMap.ListPins()
pins := m.pinMap.List()
for _, p := range pins {
rMap[p.String()] = mockPinType{"recursive"}
rMap[p.Cid.String()] = mockPinType{"recursive"}
}
j, _ := json.Marshal(mockPinLsResp{rMap})
w.Write(j)
@ -131,7 +138,7 @@ func (m *ipfsMock) handler(w http.ResponseWriter, r *http.Request) {
if err != nil {
goto ERROR
}
ok = m.pinMap.HasPin(c)
ok = m.pinMap.Has(c)
if ok {
rMap := make(map[string]mockPinType)
rMap[cidStr] = mockPinType{"recursive"}
@ -153,6 +160,6 @@ ERROR:
w.WriteHeader(http.StatusInternalServerError)
}
func (m *ipfsMock) Close() {
func (m *IpfsMock) Close() {
m.server.Close()
}

197
test/rpc_api_mock.go Normal file
View File

@ -0,0 +1,197 @@
package test
import (
"errors"
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
)
var ErrBadCid = errors.New("this is an expected error when using ErrorCid")
type mockService struct{}
// NewMockRPCClient creates a mock ipfs-cluster RPC server and returns
// a client to it.
func NewMockRPCClient(t *testing.T) *rpc.Client {
s := rpc.NewServer(nil, "mock")
c := rpc.NewClientWithServer(nil, "mock", s)
err := s.RegisterName("Cluster", &mockService{})
if err != nil {
t.Fatal(err)
}
return c
}
func (mock *mockService) Pin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) Unpin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == ErrorCid {
return ErrBadCid
}
return nil
}
func (mock *mockService) PinList(in struct{}, out *[]api.CidArgSerial) error {
*out = []api.CidArgSerial{
{
Cid: TestCid1,
},
{
Cid: TestCid2,
},
{
Cid: TestCid3,
},
}
return nil
}
func (mock *mockService) ID(in struct{}, out *api.IDSerial) error {
//_, pubkey, _ := crypto.GenerateKeyPair(
// DefaultConfigCrypto,
// DefaultConfigKeyLength)
*out = api.ID{
ID: TestPeerID1,
//PublicKey: pubkey,
Version: "0.0.mock",
IPFS: api.IPFSID{
ID: TestPeerID1,
},
}.ToSerial()
return nil
}
func (mock *mockService) Version(in struct{}, out *api.Version) error {
*out = api.Version{"0.0.mock"}
return nil
}
func (mock *mockService) Peers(in struct{}, out *[]api.IDSerial) error {
id := api.IDSerial{}
mock.ID(in, &id)
*out = []api.IDSerial{id}
return nil
}
func (mock *mockService) PeerAdd(in api.MultiaddrSerial, out *api.IDSerial) error {
id := api.IDSerial{}
mock.ID(struct{}{}, &id)
*out = id
return nil
}
func (mock *mockService) PeerRemove(in peer.ID, out *struct{}) error {
return nil
}
// FIXME: dup from util.go
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
c1, _ := cid.Decode(TestCid1)
c2, _ := cid.Decode(TestCid2)
c3, _ := cid.Decode(TestCid3)
*out = globalPinInfoSliceToSerial([]api.GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c1,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
},
{
Cid: c2,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c2,
Peer: TestPeerID1,
Status: api.TrackerStatusPinning,
TS: time.Now(),
},
},
},
{
Cid: c3,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c3,
Peer: TestPeerID1,
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
},
},
})
return nil
}
func (mock *mockService) Status(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
if in.Cid == ErrorCid {
return ErrBadCid
}
c1, _ := cid.Decode(TestCid1)
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]api.PinInfo{
TestPeerID1: {
Cid: c1,
Peer: TestPeerID1,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
}.ToSerial()
return nil
}
func (mock *mockService) SyncAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
return mock.StatusAll(in, out)
}
func (mock *mockService) Sync(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
return mock.Status(in, out)
}
func (mock *mockService) StateSync(in struct{}, out *[]api.PinInfoSerial) error {
*out = make([]api.PinInfoSerial, 0, 0)
return nil
}
func (mock *mockService) Recover(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
return mock.Status(in, out)
}
func (mock *mockService) Track(in api.CidArgSerial, out *struct{}) error {
return nil
}
func (mock *mockService) Untrack(in api.CidArgSerial, out *struct{}) error {
return nil
}
func (mock *mockService) PeerManagerPeers(in struct{}, out *[]peer.ID) error {
*out = []peer.ID{TestPeerID1, TestPeerID2, TestPeerID3}
return nil
}

3
test/test.go Normal file
View File

@ -0,0 +1,3 @@
// Package test offers testing utilities to ipfs-cluster like
// mocks
package test

62
test/test_test.go Normal file
View File

@ -0,0 +1,62 @@
package test
import (
"reflect"
"testing"
ipfscluster "github.com/ipfs/ipfs-cluster"
)
func TestIpfsMock(t *testing.T) {
ipfsmock := NewIpfsMock()
defer ipfsmock.Close()
}
// Test that our RPC mock resembles the original
func TestRPCMockValid(t *testing.T) {
mock := &mockService{}
real := &ipfscluster.RPCAPI{}
mockT := reflect.TypeOf(mock)
realT := reflect.TypeOf(real)
// Make sure all the methods we have match the original
for i := 0; i < mockT.NumMethod(); i++ {
method := mockT.Method(i)
name := method.Name
origMethod, ok := realT.MethodByName(name)
if !ok {
t.Fatalf("%s method not found in real RPC", name)
}
mType := method.Type
oType := origMethod.Type
if nout := mType.NumOut(); nout != 1 || nout != oType.NumOut() {
t.Errorf("%s: more than 1 out parameter", name)
}
if mType.Out(0).Name() != "error" {
t.Errorf("%s out param should be an error", name)
}
if nin := mType.NumIn(); nin != oType.NumIn() || nin != 3 {
t.Errorf("%s: num in parameter mismatch: %d vs. %d", name, nin, oType.NumIn())
}
for j := 1; j < 3; j++ {
mn := mType.In(j).String()
on := oType.In(j).String()
if mn != on {
t.Errorf("%s: name mismatch: %s vs %s", name, mn, on)
}
}
}
for i := 0; i < realT.NumMethod(); i++ {
name := realT.Method(i).Name
_, ok := mockT.MethodByName(name)
if !ok {
t.Logf("Warning: %s: unimplemented in mock rpc", name)
}
}
}

32
util.go
View File

@ -1,10 +1,12 @@
package ipfscluster
import (
"errors"
"fmt"
host "github.com/libp2p/go-libp2p-host"
"github.com/ipfs/ipfs-cluster/api"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
@ -18,7 +20,7 @@ import (
// return ifaces
// }
func copyIDSerialsToIfaces(in []IDSerial) []interface{} {
func copyIDSerialsToIfaces(in []api.IDSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -26,7 +28,7 @@ func copyIDSerialsToIfaces(in []IDSerial) []interface{} {
return ifaces
}
func copyPinInfoToIfaces(in []PinInfo) []interface{} {
func copyPinInfoSerialToIfaces(in []api.PinInfoSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -34,7 +36,7 @@ func copyPinInfoToIfaces(in []PinInfo) []interface{} {
return ifaces
}
func copyPinInfoSliceToIfaces(in [][]PinInfo) []interface{} {
func copyPinInfoSerialSliceToIfaces(in [][]api.PinInfoSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -120,3 +122,25 @@ func getRemoteMultiaddr(h host.Host, pid peer.ID, addr ma.Multiaddr) ma.Multiadd
}
return multiaddrJoin(addr, pid)
}
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}
func logError(fmtstr string, args ...interface{}) error {
msg := fmt.Sprintf(fmtstr, args...)
logger.Error(msg)
return errors.New(msg)
}