Move all API-related types to the /api subpackage.

At the beginning we opted for native types which were
serializable (PinInfo had a CidStr field instead of Cid).

Now we provide types in two versions: native and serializable.

Go methods use native. The rest of APIs (REST/RPC) use always
serializable versions. Methods are provided to convert between the
two.

The reason for moving these out of the way is to be able to re-use
type definitions when parsing API responses in `ipfs-cluster-ctl` or
any other clients that come up. API responses are just the serializable
version of types in JSON encoding. This also reduces having
duplicate types defs and parsing methods everywhere.

License: MIT
Signed-off-by: Hector Sanjuan <hector@protocol.ai>
This commit is contained in:
Hector Sanjuan 2017-02-08 18:04:08 +01:00
parent 08a0261aae
commit 1b3d04e18b
16 changed files with 889 additions and 929 deletions

359
api/types.go Normal file
View File

@ -0,0 +1,359 @@
// Package api holds declarations for types used in ipfs-cluster APIs to make
// them re-usable across differen tools. This include RPC API "Serial[izable]"
// versions for types. The Go API uses natives types, while RPC API,
// REST APIs etc use serializable types (i.e. json format). Converstion methods
// exists between types.
//
// Note that all conversion methods ignore any parsing errors. All values must
// be validated first before initializing any of the types defined here.
package api
import (
"time"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
)
// TrackerStatus values
const (
// IPFSStatus should never take this value
TrackerStatusBug = iota
// The cluster node is offline or not responding
TrackerStatusClusterError
// An error occurred pinning
TrackerStatusPinError
// An error occurred unpinning
TrackerStatusUnpinError
// The IPFS daemon has pinned the item
TrackerStatusPinned
// The IPFS daemon is currently pinning the item
TrackerStatusPinning
// The IPFS daemon is currently unpinning the item
TrackerStatusUnpinning
// The IPFS daemon is not pinning the item
TrackerStatusUnpinned
// The IPFS deamon is not pinning the item but it is being tracked
TrackerStatusRemotePin
)
// TrackerStatus represents the status of a tracked Cid in the PinTracker
type TrackerStatus int
var trackerStatusString = map[TrackerStatus]string{
TrackerStatusBug: "bug",
TrackerStatusClusterError: "cluster_error",
TrackerStatusPinError: "pin_error",
TrackerStatusUnpinError: "unpin_error",
TrackerStatusPinned: "pinned",
TrackerStatusPinning: "pinning",
TrackerStatusUnpinning: "unpinning",
TrackerStatusUnpinned: "unpinned",
TrackerStatusRemotePin: "remote",
}
// String converts a TrackerStatus into a readable string.
func (st TrackerStatus) String() string {
return trackerStatusString[st]
}
// TrackerStatusFromString parses a string and returns the matching
// TrackerStatus value.
func TrackerStatusFromString(str string) TrackerStatus {
for k, v := range trackerStatusString {
if v == str {
return k
}
}
return TrackerStatusBug
}
// IPFSPinStatus values
const (
IPFSPinStatusBug = iota
IPFSPinStatusError
IPFSPinStatusDirect
IPFSPinStatusRecursive
IPFSPinStatusIndirect
IPFSPinStatusUnpinned
)
// IPFSPinStatus represents the status of a pin in IPFS (direct, recursive etc.)
type IPFSPinStatus int
// IPFSPinStatusFromString parses a string and returns the matching
// IPFSPinStatus.
func IPFSPinStatusFromString(t string) IPFSPinStatus {
// TODO: This is only used in the http_connector to parse
// ipfs-daemon-returned values. Maybe it should be extended.
switch {
case t == "indirect":
return IPFSPinStatusIndirect
case t == "direct":
return IPFSPinStatusDirect
case t == "recursive":
return IPFSPinStatusRecursive
default:
return IPFSPinStatusBug
}
}
// IsPinned returns true if the status is Direct or Recursive
func (ips IPFSPinStatus) IsPinned() bool {
return ips == IPFSPinStatusDirect || ips == IPFSPinStatusRecursive
}
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid *cid.Cid
PeerMap map[peer.ID]PinInfo
}
// GlobalPinInfoSerial is the serializable version of GlobalPinInfo.
type GlobalPinInfoSerial struct {
Cid string `json:"cid"`
PeerMap map[string]PinInfoSerial `json:"peer_map"`
}
// ToSerial converts a GlobalPinInfo to its serializable version.
func (gpi GlobalPinInfo) ToSerial() GlobalPinInfoSerial {
s := GlobalPinInfoSerial{}
s.Cid = gpi.Cid.String()
s.PeerMap = make(map[string]PinInfoSerial)
for k, v := range gpi.PeerMap {
s.PeerMap[peer.IDB58Encode(k)] = v.ToSerial()
}
return s
}
// ToGlobalPinInfo converts a GlobalPinInfoSerial to its native version.
func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
c, _ := cid.Decode(gpis.Cid)
gpi := GlobalPinInfo{
Cid: c,
PeerMap: make(map[peer.ID]PinInfo),
}
for k, v := range gpis.PeerMap {
p, _ := peer.IDB58Decode(k)
gpi.PeerMap[p] = v.ToPinInfo()
}
return gpi
}
// PinInfo holds information about local pins. PinInfo is
// serialized when requesting the Global status, therefore
// we cannot use *cid.Cid.
type PinInfo struct {
Cid *cid.Cid
Peer peer.ID
Status TrackerStatus
TS time.Time
Error string
}
// PinInfoSerial is a serializable version of PinInfo.
// information is marked as
type PinInfoSerial struct {
Cid string `json:"cid"`
Peer string `json:"peer"`
Status string `json:"status"`
TS string `json:"timestamp"`
Error string `json:"error"`
}
// ToSerial converts a PinInfo to its serializable version.
func (pi PinInfo) ToSerial() PinInfoSerial {
return PinInfoSerial{
Cid: pi.Cid.String(),
Peer: peer.IDB58Encode(pi.Peer),
Status: pi.Status.String(),
TS: pi.TS.String(),
Error: pi.Error,
}
}
// ToPinInfo converts a PinInfoSerial to its native version.
func (pis PinInfoSerial) ToPinInfo() PinInfo {
c, _ := cid.Decode(pis.Cid)
p, _ := peer.IDB58Decode(pis.Peer)
ts, _ := time.Parse(time.RFC1123, pis.TS)
return PinInfo{
Cid: c,
Peer: p,
Status: TrackerStatusFromString(pis.Status),
TS: ts,
Error: pis.Error,
}
}
// Version holds version information
type Version struct {
Version string `json:"Version"`
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
Error string `json:"error"`
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
return IPFSIDSerial{
ID: peer.IDB58Encode(id.ID),
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToIPFSID converts an IPFSIDSerial to IPFSID
func (ids *IPFSIDSerial) ToIPFSID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
Addresses []ma.Multiaddr
ClusterPeers []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
//PublicKey crypto.PubKey
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string `json:"id"`
Addresses MultiaddrsSerial `json:"addresses"`
ClusterPeers MultiaddrsSerial `json:"cluster_peers"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error"`
IPFS IPFSIDSerial `json:"ipfs"`
//PublicKey []byte
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
//var pkey []byte
//if id.PublicKey != nil {
// pkey, _ = id.PublicKey.Bytes()
//}
return IDSerial{
ID: peer.IDB58Encode(id.ID),
//PublicKey: pkey,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: MultiaddrsToSerial(id.ClusterPeers),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
p, _ := peer.IDB58Decode(ids.ID)
id.ID = p
//if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
// id.PublicKey = pkey
//}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = ids.ClusterPeers.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToIPFSID()
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial string
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
return MultiaddrSerial(addr.String())
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
a, _ := ma.NewMultiaddr(string(addrS))
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
addrsS[i] = MultiaddrToSerial(a)
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
}
// CidArg is an arguments that carry a Cid. It may carry more things in the
// future.
type CidArg struct {
Cid *cid.Cid
}
// CidArgSerial is a serializable version of CidArg
type CidArgSerial struct {
Cid string `json:"cid"`
}
// ToSerial converts a CidArg to CidArgSerial.
func (carg CidArg) ToSerial() CidArgSerial {
return CidArgSerial{
Cid: carg.Cid.String(),
}
}
// ToCidArg converts a CidArgSerial to its native form.
func (cargs CidArgSerial) ToCidArg() CidArg {
c, _ := cid.Decode(cargs.Cid)
return CidArg{
Cid: c,
}
}

View File

@ -6,6 +6,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
host "github.com/libp2p/go-libp2p-host"
@ -106,7 +108,7 @@ func (c *Cluster) setupPeerManager() {
func (c *Cluster) setupRPC() error {
rpcServer := rpc.NewServer(c.host, RPCProtocol)
err := rpcServer.RegisterName("Cluster", &RPCAPI{cluster: c})
err := rpcServer.RegisterName("Cluster", &RPCAPI{c})
if err != nil {
return err
}
@ -293,7 +295,7 @@ func (c *Cluster) Done() <-chan struct{} {
}
// ID returns information about the Cluster peer
func (c *Cluster) ID() ID {
func (c *Cluster) ID() api.ID {
// ignore error since it is included in response object
ipfsID, _ := c.ipfs.ID()
var addrs []ma.Multiaddr
@ -301,9 +303,9 @@ func (c *Cluster) ID() ID {
addrs = append(addrs, multiaddrJoin(addr, c.host.ID()))
}
return ID{
return api.ID{
ID: c.host.ID(),
PublicKey: c.host.Peerstore().PubKey(c.host.ID()),
//PublicKey: c.host.Peerstore().PubKey(c.host.ID()),
Addresses: addrs,
ClusterPeers: c.peerManager.peersAddrs(),
Version: Version,
@ -319,7 +321,7 @@ func (c *Cluster) ID() ID {
// consensus and will receive the shared state (including the
// list of peers). The new peer should be a single-peer cluster,
// preferable without any relevant state.
func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
func (c *Cluster) PeerAdd(addr ma.Multiaddr) (api.ID, error) {
// starting 10 nodes on the same box for testing
// causes deadlock and a global lock here
// seems to help.
@ -328,7 +330,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
logger.Debugf("peerAdd called with %s", addr)
pid, decapAddr, err := multiaddrSplit(addr)
if err != nil {
id := ID{
id := api.ID{
Error: err.Error(),
}
return id, err
@ -340,18 +342,18 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.peerManager.addPeer(remoteAddr)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
return id, err
}
// Figure out our address to that peer. This also
// ensures that it is reachable
var addrSerial MultiaddrSerial
var addrSerial api.MultiaddrSerial
err = c.rpcClient.Call(pid, "Cluster",
"RemoteMultiaddrForPeer", c.host.ID(), &addrSerial)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
c.peerManager.rmPeer(pid, false)
return id, err
}
@ -360,7 +362,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.consensus.LogAddPeer(remoteAddr)
if err != nil {
logger.Error(err)
id := ID{ID: pid, Error: err.Error()}
id := api.ID{ID: pid, Error: err.Error()}
c.peerManager.rmPeer(pid, false)
return id, err
}
@ -371,7 +373,7 @@ func (c *Cluster) PeerAdd(addr ma.Multiaddr) (ID, error) {
err = c.rpcClient.Call(pid,
"Cluster",
"PeerManagerAddFromMultiaddrs",
MultiaddrsToSerial(clusterPeers),
api.MultiaddrsToSerial(clusterPeers),
&struct{}{})
if err != nil {
logger.Error(err)
@ -438,11 +440,11 @@ func (c *Cluster) Join(addr ma.Multiaddr) error {
// Note that PeerAdd() on the remote peer will
// figure out what our real address is (obviously not
// ClusterAddr).
var myID IDSerial
var myID api.IDSerial
err = c.rpcClient.Call(pid,
"Cluster",
"PeerAdd",
MultiaddrToSerial(multiaddrJoin(c.config.ClusterAddr, c.host.ID())),
api.MultiaddrToSerial(multiaddrJoin(c.config.ClusterAddr, c.host.ID())),
&myID)
if err != nil {
logger.Error(err)
@ -465,7 +467,7 @@ func (c *Cluster) Join(addr ma.Multiaddr) error {
// StateSync syncs the consensus state to the Pin Tracker, ensuring
// that every Cid that should be tracked is tracked. It returns
// PinInfo for Cids which were added or deleted.
func (c *Cluster) StateSync() ([]PinInfo, error) {
func (c *Cluster) StateSync() ([]api.PinInfo, error) {
cState, err := c.consensus.State()
if err != nil {
return nil, err
@ -482,7 +484,7 @@ func (c *Cluster) StateSync() ([]PinInfo, error) {
// Track items which are not tracked
for _, h := range clusterPins {
if c.tracker.Status(h).Status == TrackerStatusUnpinned {
if c.tracker.Status(h).Status == api.TrackerStatusUnpinned {
changed = append(changed, h)
go c.tracker.Track(h)
}
@ -490,14 +492,13 @@ func (c *Cluster) StateSync() ([]PinInfo, error) {
// Untrack items which should not be tracked
for _, p := range c.tracker.StatusAll() {
h, _ := cid.Decode(p.CidStr)
if !cState.HasPin(h) {
changed = append(changed, h)
go c.tracker.Untrack(h)
if !cState.HasPin(p.Cid) {
changed = append(changed, p.Cid)
go c.tracker.Untrack(p.Cid)
}
}
var infos []PinInfo
var infos []api.PinInfo
for _, h := range changed {
infos = append(infos, c.tracker.Status(h))
}
@ -506,13 +507,13 @@ func (c *Cluster) StateSync() ([]PinInfo, error) {
// StatusAll returns the GlobalPinInfo for all tracked Cids. If an error
// happens, the slice will contain as much information as could be fetched.
func (c *Cluster) StatusAll() ([]GlobalPinInfo, error) {
func (c *Cluster) StatusAll() ([]api.GlobalPinInfo, error) {
return c.globalPinInfoSlice("TrackerStatusAll")
}
// Status returns the GlobalPinInfo for a given Cid. If an error happens,
// the GlobalPinInfo should contain as much information as could be fetched.
func (c *Cluster) Status(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Status(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerStatus", h)
}
@ -521,14 +522,13 @@ func (c *Cluster) Status(h *cid.Cid) (GlobalPinInfo, error) {
//
// SyncAllLocal returns the list of PinInfo that where updated because of
// the operation, along with those in error states.
func (c *Cluster) SyncAllLocal() ([]PinInfo, error) {
func (c *Cluster) SyncAllLocal() ([]api.PinInfo, error) {
syncedItems, err := c.tracker.SyncAll()
// Despite errors, tracker provides synced items that we can provide.
// They encapsulate the error.
if err != nil {
logger.Error("tracker.Sync() returned with error: ", err)
logger.Error("Is the ipfs daemon running?")
logger.Error("LocalSync returning without attempting recovers")
}
return syncedItems, err
}
@ -536,7 +536,7 @@ func (c *Cluster) SyncAllLocal() ([]PinInfo, error) {
// SyncLocal performs a local sync operation for the given Cid. This will
// tell the tracker to verify the status of the Cid against the IPFS daemon.
// It returns the updated PinInfo for the Cid.
func (c *Cluster) SyncLocal(h *cid.Cid) (PinInfo, error) {
func (c *Cluster) SyncLocal(h *cid.Cid) (api.PinInfo, error) {
var err error
pInfo, err := c.tracker.Sync(h)
// Despite errors, trackers provides an updated PinInfo so
@ -549,24 +549,24 @@ func (c *Cluster) SyncLocal(h *cid.Cid) (PinInfo, error) {
}
// SyncAll triggers LocalSync() operations in all cluster peers.
func (c *Cluster) SyncAll() ([]GlobalPinInfo, error) {
func (c *Cluster) SyncAll() ([]api.GlobalPinInfo, error) {
return c.globalPinInfoSlice("SyncAllLocal")
}
// Sync triggers a LocalSyncCid() operation for a given Cid
// in all cluster peers.
func (c *Cluster) Sync(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Sync(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("SyncLocal", h)
}
// RecoverLocal triggers a recover operation for a given Cid
func (c *Cluster) RecoverLocal(h *cid.Cid) (PinInfo, error) {
func (c *Cluster) RecoverLocal(h *cid.Cid) (api.PinInfo, error) {
return c.tracker.Recover(h)
}
// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(h *cid.Cid) (GlobalPinInfo, error) {
func (c *Cluster) Recover(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerRecover", h)
}
@ -620,10 +620,10 @@ func (c *Cluster) Version() string {
}
// Peers returns the IDs of the members of this Cluster
func (c *Cluster) Peers() []ID {
func (c *Cluster) Peers() []api.ID {
members := c.peerManager.peers()
peersSerial := make([]IDSerial, len(members), len(members))
peers := make([]ID, len(members), len(members))
peersSerial := make([]api.IDSerial, len(members), len(members))
peers := make([]api.ID, len(members), len(members))
errs := c.multiRPC(members, "Cluster", "ID", struct{}{},
copyIDSerialsToIfaces(peersSerial))
@ -697,25 +697,32 @@ func (c *Cluster) multiRPC(dests []peer.ID, svcName, svcMethod string, args inte
}
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (GlobalPinInfo, error) {
pin := GlobalPinInfo{
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (api.GlobalPinInfo, error) {
pin := api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[peer.ID]PinInfo),
PeerMap: make(map[peer.ID]api.PinInfo),
}
members := c.peerManager.peers()
replies := make([]PinInfo, len(members), len(members))
args := NewCidArg(h)
errs := c.multiRPC(members, "Cluster", method, args, copyPinInfoToIfaces(replies))
replies := make([]api.PinInfoSerial, len(members), len(members))
arg := api.CidArg{
Cid: h,
}
errs := c.multiRPC(members,
"Cluster",
method, arg.ToSerial(),
copyPinInfoSerialToIfaces(replies))
for i, r := range replies {
if e := errs[i]; e != nil { // This error must come from not being able to contact that cluster member
for i, rserial := range replies {
r := rserial.ToPinInfo()
if e := errs[i]; e != nil {
if r.Status == api.TrackerStatusBug {
// This error must come from not being able to contact that cluster member
logger.Errorf("%s: error in broadcast response from %s: %s ", c.host.ID(), members[i], e)
if r.Status == TrackerStatusBug {
r = PinInfo{
CidStr: h.String(),
r = api.PinInfo{
Cid: r.Cid,
Peer: members[i],
Status: TrackerStatusClusterError,
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: e.Error(),
}
@ -729,22 +736,25 @@ func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (GlobalPinInfo, er
return pin, nil
}
func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
var infos []GlobalPinInfo
fullMap := make(map[string]GlobalPinInfo)
func (c *Cluster) globalPinInfoSlice(method string) ([]api.GlobalPinInfo, error) {
var infos []api.GlobalPinInfo
fullMap := make(map[string]api.GlobalPinInfo)
members := c.peerManager.peers()
replies := make([][]PinInfo, len(members), len(members))
errs := c.multiRPC(members, "Cluster", method, struct{}{}, copyPinInfoSliceToIfaces(replies))
replies := make([][]api.PinInfoSerial, len(members), len(members))
errs := c.multiRPC(members,
"Cluster",
method, struct{}{},
copyPinInfoSerialSliceToIfaces(replies))
mergePins := func(pins []PinInfo) {
for _, p := range pins {
item, ok := fullMap[p.CidStr]
c, _ := cid.Decode(p.CidStr)
mergePins := func(pins []api.PinInfoSerial) {
for _, pserial := range pins {
p := pserial.ToPinInfo()
item, ok := fullMap[pserial.Cid]
if !ok {
fullMap[p.CidStr] = GlobalPinInfo{
Cid: c,
PeerMap: map[peer.ID]PinInfo{
fullMap[pserial.Cid] = api.GlobalPinInfo{
Cid: p.Cid,
PeerMap: map[peer.ID]api.PinInfo{
p.Peer: p,
},
}
@ -766,11 +776,12 @@ func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
// Merge any errors
for p, msg := range erroredPeers {
for c := range fullMap {
fullMap[c].PeerMap[p] = PinInfo{
CidStr: c,
for cidStr := range fullMap {
c, _ := cid.Decode(cidStr)
fullMap[cidStr].PeerMap[p] = api.PinInfo{
Cid: c,
Peer: p,
Status: TrackerStatusClusterError,
Status: api.TrackerStatusClusterError,
TS: time.Now(),
Error: msg,
}
@ -784,8 +795,8 @@ func (c *Cluster) globalPinInfoSlice(method string) ([]GlobalPinInfo, error) {
return infos, nil
}
func (c *Cluster) getIDForPeer(pid peer.ID) (ID, error) {
idSerial := ID{ID: pid}.ToSerial()
func (c *Cluster) getIDForPeer(pid peer.ID) (api.ID, error) {
idSerial := api.ID{ID: pid}.ToSerial()
err := c.rpcClient.Call(
pid, "Cluster", "ID", struct{}{}, &idSerial)
id := idSerial.ToID()

View File

@ -4,6 +4,8 @@ import (
"errors"
"testing"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
)
@ -30,11 +32,11 @@ type mockConnector struct {
mockComponent
}
func (ipfs *mockConnector) ID() (IPFSID, error) {
func (ipfs *mockConnector) ID() (api.IPFSID, error) {
if ipfs.returnError {
return IPFSID{}, errors.New("")
return api.IPFSID{}, errors.New("")
}
return IPFSID{
return api.IPFSID{
ID: testPeerID,
}, nil
}
@ -53,18 +55,18 @@ func (ipfs *mockConnector) Unpin(c *cid.Cid) error {
return nil
}
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLsCid(c *cid.Cid) (api.IPFSPinStatus, error) {
if ipfs.returnError {
return IPFSPinStatusError, errors.New("")
return api.IPFSPinStatusError, errors.New("")
}
return IPFSPinStatusRecursive, nil
return api.IPFSPinStatusRecursive, nil
}
func (ipfs *mockConnector) PinLs() (map[string]IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLs() (map[string]api.IPFSPinStatus, error) {
if ipfs.returnError {
return nil, errors.New("")
}
m := make(map[string]IPFSPinStatus)
m := make(map[string]api.IPFSPinStatus)
return m, nil
}
@ -109,7 +111,7 @@ func TestClusterStateSync(t *testing.T) {
defer cl.Shutdown()
_, err := cl.StateSync()
if err == nil {
t.Error("expected an error as there is no state to sync")
t.Fatal("expected an error as there is no state to sync")
}
c, _ := cid.Decode(testCid)
@ -146,9 +148,9 @@ func TestClusterID(t *testing.T) {
if id.Version != Version {
t.Error("version should match current version")
}
if id.PublicKey == nil {
t.Error("publicKey should not be empty")
}
//if id.PublicKey == nil {
// t.Error("publicKey should not be empty")
//}
}
func TestClusterPin(t *testing.T) {

View File

@ -6,6 +6,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
consensus "github.com/libp2p/go-libp2p-consensus"
@ -65,7 +67,7 @@ func (op *clusterLogOp) ApplyTo(cstate consensus.State) (consensus.State, error)
op.rpcClient.Go("",
"Cluster",
"Track",
NewCidArg(c),
api.CidArg{c}.ToSerial(),
&struct{}{},
nil)
case LogOpUnpin:
@ -81,7 +83,7 @@ func (op *clusterLogOp) ApplyTo(cstate consensus.State) (consensus.State, error)
op.rpcClient.Go("",
"Cluster",
"Untrack",
NewCidArg(c),
api.CidArg{c}.ToSerial(),
&struct{}{},
nil)
case LogOpAddPeer:
@ -92,7 +94,7 @@ func (op *clusterLogOp) ApplyTo(cstate consensus.State) (consensus.State, error)
op.rpcClient.Call("",
"Cluster",
"PeerManagerAddPeer",
MultiaddrToSerial(addr),
api.MultiaddrToSerial(addr),
&struct{}{})
// TODO rebalance ops
case LogOpRmPeer:
@ -231,13 +233,13 @@ func (cc *Consensus) finishBootstrap() {
if err != nil {
logger.Debug("skipping state sync: ", err)
} else {
var pInfo []PinInfo
var pInfoSerial []api.PinInfoSerial
cc.rpcClient.Go(
"",
"Cluster",
"StateSync",
struct{}{},
&pInfo,
&pInfoSerial,
nil)
}
cc.readyCh <- struct{}{}
@ -341,7 +343,8 @@ func (cc *Consensus) logOpCid(rpcOp string, opType clusterLogOpType, c *cid.Cid)
var finalErr error
for i := 0; i < CommitRetries; i++ {
logger.Debugf("Try %d", i)
redirected, err := cc.redirectToLeader(rpcOp, NewCidArg(c))
redirected, err := cc.redirectToLeader(
rpcOp, api.CidArg{c}.ToSerial())
if err != nil {
finalErr = err
continue
@ -395,7 +398,8 @@ func (cc *Consensus) LogAddPeer(addr ma.Multiaddr) error {
var finalErr error
for i := 0; i < CommitRetries; i++ {
logger.Debugf("Try %d", i)
redirected, err := cc.redirectToLeader("ConsensusLogAddPeer", MultiaddrToSerial(addr))
redirected, err := cc.redirectToLeader(
"ConsensusLogAddPeer", api.MultiaddrToSerial(addr))
if err != nil {
finalErr = err
continue

View File

@ -214,7 +214,7 @@ func run(c *cli.Context) error {
if a := c.String("bootstrap"); a != "" {
if len(cfg.ClusterPeers) > 0 && !c.Bool("force") {
return errors.New("The configuration provides ClusterPeers. Use -f to ignore and proceed bootstrapping")
return errors.New("the configuration provides ClusterPeers. Use -f to ignore and proceed bootstrapping")
}
joinAddr, err := ma.NewMultiaddr(a)
if err != nil {

View File

@ -14,6 +14,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
@ -239,7 +241,7 @@ func (ipfs *IPFSHTTPConnector) pinOpHandler(op string, w http.ResponseWriter, r
err = ipfs.rpcClient.Call("",
"Cluster",
op,
&CidArg{arg},
api.CidArgSerial{arg},
&struct{}{})
if err != nil {
@ -345,8 +347,8 @@ func (ipfs *IPFSHTTPConnector) Shutdown() error {
// If the request fails, or the parsing fails, it
// returns an error and an empty IPFSID which also
// contains the error message.
func (ipfs *IPFSHTTPConnector) ID() (IPFSID, error) {
id := IPFSID{}
func (ipfs *IPFSHTTPConnector) ID() (api.IPFSID, error) {
id := api.IPFSID{}
body, err := ipfs.get("id")
if err != nil {
id.Error = err.Error()
@ -420,22 +422,9 @@ func (ipfs *IPFSHTTPConnector) Unpin(hash *cid.Cid) error {
return nil
}
func parseIPFSPinType(t string) IPFSPinStatus {
switch {
case t == "indirect":
return IPFSPinStatusIndirect
case t == "direct":
return IPFSPinStatusDirect
case t == "recursive":
return IPFSPinStatusRecursive
default:
return IPFSPinStatusBug
}
}
// PinLs performs a "pin ls" request against the configured IPFS daemon and
// returns a map of cid strings and their status.
func (ipfs *IPFSHTTPConnector) PinLs() (map[string]IPFSPinStatus, error) {
func (ipfs *IPFSHTTPConnector) PinLs() (map[string]api.IPFSPinStatus, error) {
body, err := ipfs.get("pin/ls")
// Some error talking to the daemon
@ -451,27 +440,27 @@ func (ipfs *IPFSHTTPConnector) PinLs() (map[string]IPFSPinStatus, error) {
return nil, err
}
statusMap := make(map[string]IPFSPinStatus)
statusMap := make(map[string]api.IPFSPinStatus)
for k, v := range resp.Keys {
statusMap[k] = parseIPFSPinType(v.Type)
statusMap[k] = api.IPFSPinStatusFromString(v.Type)
}
return statusMap, nil
}
// PinLsCid performs a "pin ls <hash> "request and returns IPFSPinStatus for
// that hash.
func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (IPFSPinStatus, error) {
func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (api.IPFSPinStatus, error) {
lsPath := fmt.Sprintf("pin/ls?arg=%s", hash)
body, err := ipfs.get(lsPath)
// Network error, daemon down
if body == nil && err != nil {
return IPFSPinStatusError, err
return api.IPFSPinStatusError, err
}
// Pin not found likely here
if err != nil { // Not pinned
return IPFSPinStatusUnpinned, nil
return api.IPFSPinStatusUnpinned, nil
}
var resp ipfsPinLsResp
@ -479,14 +468,14 @@ func (ipfs *IPFSHTTPConnector) PinLsCid(hash *cid.Cid) (IPFSPinStatus, error) {
if err != nil {
logger.Error("parsing pin/ls?arg=cid response:")
logger.Error(string(body))
return IPFSPinStatusError, err
return api.IPFSPinStatusError, err
}
pinObj, ok := resp.Keys[hash.String()]
if !ok {
return IPFSPinStatusError, errors.New("expected to find the pin in the response")
return api.IPFSPinStatusError, errors.New("expected to find the pin in the response")
}
return parseIPFSPinType(pinObj.Type), nil
return api.IPFSPinStatusFromString(pinObj.Type), nil
}
// get performs the heavy lifting of a get request against

View File

@ -7,6 +7,8 @@ import (
"net/http"
"testing"
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
ma "github.com/multiformats/go-multiaddr"
)
@ -116,7 +118,7 @@ func TestIPFSPinLsCid(t *testing.T) {
}
ips, err = ipfs.PinLsCid(c2)
if err != nil || ips != IPFSPinStatusUnpinned {
if err != nil || ips != api.IPFSPinStatusUnpinned {
t.Error("c2 should appear unpinned")
}
}

View File

@ -9,106 +9,17 @@
package ipfscluster
import (
"time"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
ma "github.com/multiformats/go-multiaddr"
"github.com/ipfs/ipfs-cluster/api"
)
// RPCProtocol is used to send libp2p messages between cluster peers
var RPCProtocol = protocol.ID("/ipfscluster/" + Version + "/rpc")
// TrackerStatus values
const (
// IPFSStatus should never take this value
TrackerStatusBug = iota
// The cluster node is offline or not responding
TrackerStatusClusterError
// An error occurred pinning
TrackerStatusPinError
// An error occurred unpinning
TrackerStatusUnpinError
// The IPFS daemon has pinned the item
TrackerStatusPinned
// The IPFS daemon is currently pinning the item
TrackerStatusPinning
// The IPFS daemon is currently unpinning the item
TrackerStatusUnpinning
// The IPFS daemon is not pinning the item
TrackerStatusUnpinned
// The IPFS deamon is not pinning the item but it is being tracked
TrackerStatusRemotePin
)
// TrackerStatus represents the status of a tracked Cid in the PinTracker
type TrackerStatus int
// IPFSPinStatus values
const (
IPFSPinStatusBug = iota
IPFSPinStatusError
IPFSPinStatusDirect
IPFSPinStatusRecursive
IPFSPinStatusIndirect
IPFSPinStatusUnpinned
)
// IPFSPinStatus represents the status of a pin in IPFS (direct, recursive etc.)
type IPFSPinStatus int
// IsPinned returns true if the status is Direct or Recursive
func (ips IPFSPinStatus) IsPinned() bool {
return ips == IPFSPinStatusDirect || ips == IPFSPinStatusRecursive
}
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid *cid.Cid
PeerMap map[peer.ID]PinInfo
}
// PinInfo holds information about local pins. PinInfo is
// serialized when requesting the Global status, therefore
// we cannot use *cid.Cid.
type PinInfo struct {
CidStr string
Peer peer.ID
Status TrackerStatus
TS time.Time
Error string
}
// String converts an IPFSStatus into a readable string.
func (st TrackerStatus) String() string {
switch st {
case TrackerStatusBug:
return "bug"
case TrackerStatusClusterError:
return "cluster_error"
case TrackerStatusPinError:
return "pin_error"
case TrackerStatusUnpinError:
return "unpin_error"
case TrackerStatusPinned:
return "pinned"
case TrackerStatusPinning:
return "pinning"
case TrackerStatusUnpinning:
return "unpinning"
case TrackerStatusUnpinned:
return "unpinned"
case TrackerStatusRemotePin:
return "remote"
default:
return ""
}
}
// Component represents a piece of ipfscluster. Cluster components
// usually run their own goroutines (a http server for example). They
// communicate with the main Cluster component and other components
@ -128,11 +39,11 @@ type API interface {
// an IPFS daemon. This is a base component.
type IPFSConnector interface {
Component
ID() (IPFSID, error)
ID() (api.IPFSID, error)
Pin(*cid.Cid) error
Unpin(*cid.Cid) error
PinLsCid(*cid.Cid) (IPFSPinStatus, error)
PinLs() (map[string]IPFSPinStatus, error)
PinLsCid(*cid.Cid) (api.IPFSPinStatus, error)
PinLs() (map[string]api.IPFSPinStatus, error)
}
// Peered represents a component which needs to be aware of the peers
@ -170,154 +81,15 @@ type PinTracker interface {
// may perform an IPFS unpin operation.
Untrack(*cid.Cid) error
// StatusAll returns the list of pins with their local status.
StatusAll() []PinInfo
StatusAll() []api.PinInfo
// Status returns the local status of a given Cid.
Status(*cid.Cid) PinInfo
Status(*cid.Cid) api.PinInfo
// SyncAll makes sure that all tracked Cids reflect the real IPFS status.
// It returns the list of pins which were updated by the call.
SyncAll() ([]PinInfo, error)
SyncAll() ([]api.PinInfo, error)
// Sync makes sure that the Cid status reflect the real IPFS status.
// It returns the local status of the Cid.
Sync(*cid.Cid) (PinInfo, error)
Sync(*cid.Cid) (api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in Cids with error status.
Recover(*cid.Cid) (PinInfo, error)
}
// IPFSID is used to store information about the underlying IPFS daemon
type IPFSID struct {
ID peer.ID
Addresses []ma.Multiaddr
Error string
}
// IPFSIDSerial is the serializable IPFSID for RPC requests
type IPFSIDSerial struct {
ID string
Addresses MultiaddrsSerial
Error string
}
// ToSerial converts IPFSID to a go serializable object
func (id *IPFSID) ToSerial() IPFSIDSerial {
return IPFSIDSerial{
ID: peer.IDB58Encode(id.ID),
Addresses: MultiaddrsToSerial(id.Addresses),
Error: id.Error,
}
}
// ToID converts an IPFSIDSerial to IPFSID
// It will ignore any errors when parsing the fields.
func (ids *IPFSIDSerial) ToID() IPFSID {
id := IPFSID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.Error = ids.Error
return id
}
// ID holds information about the Cluster peer
type ID struct {
ID peer.ID
PublicKey crypto.PubKey
Addresses []ma.Multiaddr
ClusterPeers []ma.Multiaddr
Version string
Commit string
RPCProtocolVersion protocol.ID
Error string
IPFS IPFSID
}
// IDSerial is the serializable ID counterpart for RPC requests
type IDSerial struct {
ID string
PublicKey []byte
Addresses MultiaddrsSerial
ClusterPeers MultiaddrsSerial
Version string
Commit string
RPCProtocolVersion string
Error string
IPFS IPFSIDSerial
}
// ToSerial converts an ID to its Go-serializable version
func (id ID) ToSerial() IDSerial {
var pkey []byte
if id.PublicKey != nil {
pkey, _ = id.PublicKey.Bytes()
}
return IDSerial{
ID: peer.IDB58Encode(id.ID),
PublicKey: pkey,
Addresses: MultiaddrsToSerial(id.Addresses),
ClusterPeers: MultiaddrsToSerial(id.ClusterPeers),
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: id.IPFS.ToSerial(),
}
}
// ToID converts an IDSerial object to ID.
// It will ignore any errors when parsing the fields.
func (ids IDSerial) ToID() ID {
id := ID{}
if pID, err := peer.IDB58Decode(ids.ID); err == nil {
id.ID = pID
}
if pkey, err := crypto.UnmarshalPublicKey(ids.PublicKey); err == nil {
id.PublicKey = pkey
}
id.Addresses = ids.Addresses.ToMultiaddrs()
id.ClusterPeers = ids.ClusterPeers.ToMultiaddrs()
id.Version = ids.Version
id.Commit = ids.Commit
id.RPCProtocolVersion = protocol.ID(ids.RPCProtocolVersion)
id.Error = ids.Error
id.IPFS = ids.IPFS.ToID()
return id
}
// MultiaddrSerial is a Multiaddress in a serializable form
type MultiaddrSerial []byte
// MultiaddrsSerial is an array of Multiaddresses in serializable form
type MultiaddrsSerial []MultiaddrSerial
// MultiaddrToSerial converts a Multiaddress to its serializable form
func MultiaddrToSerial(addr ma.Multiaddr) MultiaddrSerial {
return addr.Bytes()
}
// ToMultiaddr converts a serializable Multiaddress to its original type.
// All errors are ignored.
func (addrS MultiaddrSerial) ToMultiaddr() ma.Multiaddr {
a, _ := ma.NewMultiaddrBytes(addrS)
return a
}
// MultiaddrsToSerial converts a slice of Multiaddresses to its
// serializable form.
func MultiaddrsToSerial(addrs []ma.Multiaddr) MultiaddrsSerial {
addrsS := make([]MultiaddrSerial, len(addrs), len(addrs))
for i, a := range addrs {
addrsS[i] = MultiaddrToSerial(a)
}
return addrsS
}
// ToMultiaddrs converts MultiaddrsSerial back to a slice of Multiaddresses
func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
addrs := make([]ma.Multiaddr, len(addrsS), len(addrsS))
for i, addrS := range addrsS {
addrs[i] = addrS.ToMultiaddr()
}
return addrs
Recover(*cid.Cid) (api.PinInfo, error)
}

View File

@ -8,6 +8,8 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
@ -222,8 +224,8 @@ func TestClustersPeers(t *testing.T) {
t.Fatal("expected as many peers as clusters")
}
clusterIDMap := make(map[peer.ID]ID)
peerIDMap := make(map[peer.ID]ID)
clusterIDMap := make(map[peer.ID]api.ID)
peerIDMap := make(map[peer.ID]api.ID)
for _, c := range clusters {
id := c.ID()
@ -239,9 +241,9 @@ func TestClustersPeers(t *testing.T) {
if !ok {
t.Fatal("expected id in both maps")
}
if !crypto.KeyEqual(id.PublicKey, id2.PublicKey) {
t.Error("expected same public key")
}
//if !crypto.KeyEqual(id.PublicKey, id2.PublicKey) {
// t.Error("expected same public key")
//}
if id.IPFS.ID != id2.IPFS.ID {
t.Error("expected same ipfs daemon ID")
}
@ -271,9 +273,9 @@ func TestClustersPin(t *testing.T) {
fpinned := func(t *testing.T, c *Cluster) {
status := c.tracker.StatusAll()
for _, v := range status {
if v.Status != TrackerStatusPinned {
if v.Status != api.TrackerStatusPinned {
t.Errorf("%s should have been pinned but it is %s",
v.CidStr,
v.Cid,
v.Status.String())
}
}
@ -334,7 +336,7 @@ func TestClustersStatusAll(t *testing.T) {
t.Error("bad info in status")
}
if info[c.host.ID()].Status != TrackerStatusPinned {
if info[c.host.ID()].Status != api.TrackerStatusPinned {
t.Error("the hash should have been pinned")
}
@ -348,7 +350,7 @@ func TestClustersStatusAll(t *testing.T) {
t.Fatal("Host not in status")
}
if pinfo.Status != TrackerStatusPinned {
if pinfo.Status != api.TrackerStatusPinned {
t.Error("the status should show the hash as pinned")
}
}
@ -375,7 +377,7 @@ func TestClustersSyncAllLocal(t *testing.T) {
t.Fatal("expected 1 elem slice")
}
// Last-known state may still be pinning
if infos[0].Status != TrackerStatusPinError && infos[0].Status != TrackerStatusPinning {
if infos[0].Status != api.TrackerStatusPinError && infos[0].Status != api.TrackerStatusPinning {
t.Error("element should be in Pinning or PinError state")
}
}
@ -397,7 +399,7 @@ func TestClustersSyncLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinError && info.Status != TrackerStatusPinning {
if info.Status != api.TrackerStatusPinError && info.Status != api.TrackerStatusPinning {
t.Errorf("element is %s and not PinError", info.Status)
}
@ -406,7 +408,7 @@ func TestClustersSyncLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinned {
if info.Status != api.TrackerStatusPinned {
t.Error("element should be in Pinned state")
}
}
@ -439,7 +441,7 @@ func TestClustersSyncAll(t *testing.T) {
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinError && inf.Status != TrackerStatusPinning {
if inf.Status != api.TrackerStatusPinError && inf.Status != api.TrackerStatusPinning {
t.Error("should be PinError in all peers")
}
}
@ -480,7 +482,7 @@ func TestClustersSync(t *testing.T) {
t.Fatal("GlobalPinInfo should not be empty for this host")
}
if inf.Status != TrackerStatusPinError && inf.Status != TrackerStatusPinning {
if inf.Status != api.TrackerStatusPinError && inf.Status != api.TrackerStatusPinning {
t.Error("should be PinError or Pinning in all peers")
}
}
@ -500,7 +502,7 @@ func TestClustersSync(t *testing.T) {
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinned {
if inf.Status != api.TrackerStatusPinned {
t.Error("the GlobalPinInfo should show Pinned in all peers")
}
}
@ -521,7 +523,7 @@ func TestClustersRecoverLocal(t *testing.T) {
if err == nil {
t.Error("expected an error recovering")
}
if info.Status != TrackerStatusPinError {
if info.Status != api.TrackerStatusPinError {
t.Errorf("element is %s and not PinError", info.Status)
}
@ -530,7 +532,7 @@ func TestClustersRecoverLocal(t *testing.T) {
if err != nil {
t.Error(err)
}
if info.Status != TrackerStatusPinned {
if info.Status != api.TrackerStatusPinned {
t.Error("element should be in Pinned state")
}
}
@ -566,11 +568,11 @@ func TestClustersRecover(t *testing.T) {
for _, c := range clusters {
inf, ok := ginfo.PeerMap[c.host.ID()]
if !ok {
t.Logf("%+v", ginfo)
t.Fatal("GlobalPinInfo should not be empty for this host")
}
if inf.Status != TrackerStatusPinError {
if inf.Status != api.TrackerStatusPinError {
t.Logf("%+v", inf)
t.Error("should be PinError in all peers")
}
}
@ -590,7 +592,7 @@ func TestClustersRecover(t *testing.T) {
if !ok {
t.Fatal("GlobalPinInfo should have this cluster")
}
if inf.Status != TrackerStatusPinned {
if inf.Status != api.TrackerStatusPinned {
t.Error("the GlobalPinInfo should show Pinned in all peers")
}
}

View File

@ -6,6 +6,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
@ -30,7 +32,7 @@ var (
// to store the status of the tracked Cids. This component is thread-safe.
type MapPinTracker struct {
mux sync.RWMutex
status map[string]PinInfo
status map[string]api.PinInfo
ctx context.Context
rpcClient *rpc.Client
@ -50,7 +52,7 @@ func NewMapPinTracker(cfg *Config) *MapPinTracker {
mpt := &MapPinTracker{
ctx: ctx,
status: make(map[string]PinInfo),
status: make(map[string]api.PinInfo),
rpcReady: make(chan struct{}, 1),
peerID: cfg.ID,
shutdownCh: make(chan struct{}, 1),
@ -92,21 +94,20 @@ func (mpt *MapPinTracker) Shutdown() error {
return nil
}
func (mpt *MapPinTracker) set(c *cid.Cid, s TrackerStatus) {
func (mpt *MapPinTracker) set(c *cid.Cid, s api.TrackerStatus) {
mpt.mux.Lock()
defer mpt.mux.Unlock()
mpt.unsafeSet(c, s)
}
func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s TrackerStatus) {
if s == TrackerStatusUnpinned {
func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s api.TrackerStatus) {
if s == api.TrackerStatusUnpinned {
delete(mpt.status, c.String())
return
}
mpt.status[c.String()] = PinInfo{
// cid: c,
CidStr: c.String(),
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: s,
TS: time.Now(),
@ -114,19 +115,19 @@ func (mpt *MapPinTracker) unsafeSet(c *cid.Cid, s TrackerStatus) {
}
}
func (mpt *MapPinTracker) get(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) get(c *cid.Cid) api.PinInfo {
mpt.mux.RLock()
defer mpt.mux.RUnlock()
return mpt.unsafeGet(c)
}
func (mpt *MapPinTracker) unsafeGet(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) unsafeGet(c *cid.Cid) api.PinInfo {
p, ok := mpt.status[c.String()]
if !ok {
return PinInfo{
CidStr: c.String(),
return api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusUnpinned,
Status: api.TrackerStatusUnpinned,
TS: time.Now(),
Error: "",
}
@ -144,19 +145,19 @@ func (mpt *MapPinTracker) setError(c *cid.Cid, err error) {
func (mpt *MapPinTracker) unsafeSetError(c *cid.Cid, err error) {
p := mpt.unsafeGet(c)
switch p.Status {
case TrackerStatusPinned, TrackerStatusPinning, TrackerStatusPinError:
mpt.status[c.String()] = PinInfo{
CidStr: c.String(),
case api.TrackerStatusPinned, api.TrackerStatusPinning, api.TrackerStatusPinError:
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusPinError,
Status: api.TrackerStatusPinError,
TS: time.Now(),
Error: err.Error(),
}
case TrackerStatusUnpinned, TrackerStatusUnpinning, TrackerStatusUnpinError:
mpt.status[c.String()] = PinInfo{
CidStr: c.String(),
case api.TrackerStatusUnpinned, api.TrackerStatusUnpinning, api.TrackerStatusUnpinError:
mpt.status[c.String()] = api.PinInfo{
Cid: c,
Peer: mpt.peerID,
Status: TrackerStatusUnpinError,
Status: api.TrackerStatusUnpinError,
TS: time.Now(),
Error: err.Error(),
}
@ -164,33 +165,33 @@ func (mpt *MapPinTracker) unsafeSetError(c *cid.Cid, err error) {
}
func (mpt *MapPinTracker) pin(c *cid.Cid) error {
mpt.set(c, TrackerStatusPinning)
mpt.set(c, api.TrackerStatusPinning)
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPin",
NewCidArg(c),
api.CidArg{c}.ToSerial(),
&struct{}{})
if err != nil {
mpt.setError(c, err)
return err
}
mpt.set(c, TrackerStatusPinned)
mpt.set(c, api.TrackerStatusPinned)
return nil
}
func (mpt *MapPinTracker) unpin(c *cid.Cid) error {
mpt.set(c, TrackerStatusUnpinning)
mpt.set(c, api.TrackerStatusUnpinning)
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSUnpin",
NewCidArg(c),
api.CidArg{c}.ToSerial(),
&struct{}{})
if err != nil {
mpt.setError(c, err)
return err
}
mpt.set(c, TrackerStatusUnpinned)
mpt.set(c, api.TrackerStatusUnpinned)
return nil
}
@ -208,16 +209,16 @@ func (mpt *MapPinTracker) Untrack(c *cid.Cid) error {
// Status returns information for a Cid tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) Status(c *cid.Cid) PinInfo {
func (mpt *MapPinTracker) Status(c *cid.Cid) api.PinInfo {
return mpt.get(c)
}
// StatusAll returns information for all Cids tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) StatusAll() []PinInfo {
func (mpt *MapPinTracker) StatusAll() []api.PinInfo {
mpt.mux.Lock()
defer mpt.mux.Unlock()
pins := make([]PinInfo, 0, len(mpt.status))
pins := make([]api.PinInfo, 0, len(mpt.status))
for _, v := range mpt.status {
pins = append(pins, v)
}
@ -232,12 +233,12 @@ func (mpt *MapPinTracker) StatusAll() []PinInfo {
// Pins in error states can be recovered with Recover().
// An error is returned if we are unable to contact
// the IPFS daemon.
func (mpt *MapPinTracker) Sync(c *cid.Cid) (PinInfo, error) {
var ips IPFSPinStatus
func (mpt *MapPinTracker) Sync(c *cid.Cid) (api.PinInfo, error) {
var ips api.IPFSPinStatus
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPinLsCid",
NewCidArg(c),
api.CidArg{c}.ToSerial(),
&ips)
if err != nil {
mpt.setError(c, err)
@ -254,9 +255,9 @@ func (mpt *MapPinTracker) Sync(c *cid.Cid) (PinInfo, error) {
// were updated or have errors. Cids in error states can be recovered
// with Recover().
// An error is returned if we are unable to contact the IPFS daemon.
func (mpt *MapPinTracker) SyncAll() ([]PinInfo, error) {
var ipsMap map[string]IPFSPinStatus
var pInfos []PinInfo
func (mpt *MapPinTracker) SyncAll() ([]api.PinInfo, error) {
var ipsMap map[string]api.IPFSPinStatus
var pInfos []api.PinInfo
err := mpt.rpcClient.Call("",
"Cluster",
"IPFSPinLs",
@ -275,56 +276,53 @@ func (mpt *MapPinTracker) SyncAll() ([]PinInfo, error) {
status := mpt.StatusAll()
for _, pInfoOrig := range status {
c, err := cid.Decode(pInfoOrig.CidStr)
if err != nil { // this should not happen but let's play safe
return pInfos, err
}
var pInfoNew PinInfo
ips, ok := ipsMap[pInfoOrig.CidStr]
var pInfoNew api.PinInfo
c := pInfoOrig.Cid
ips, ok := ipsMap[c.String()]
if !ok {
pInfoNew = mpt.syncStatus(c, IPFSPinStatusUnpinned)
pInfoNew = mpt.syncStatus(c, api.IPFSPinStatusUnpinned)
} else {
pInfoNew = mpt.syncStatus(c, ips)
}
if pInfoOrig.Status != pInfoNew.Status ||
pInfoNew.Status == TrackerStatusUnpinError ||
pInfoNew.Status == TrackerStatusPinError {
pInfoNew.Status == api.TrackerStatusUnpinError ||
pInfoNew.Status == api.TrackerStatusPinError {
pInfos = append(pInfos, pInfoNew)
}
}
return pInfos, nil
}
func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips IPFSPinStatus) PinInfo {
func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
p := mpt.get(c)
if ips.IsPinned() {
switch p.Status {
case TrackerStatusPinned: // nothing
case TrackerStatusPinning, TrackerStatusPinError:
mpt.set(c, TrackerStatusPinned)
case TrackerStatusUnpinning:
case api.TrackerStatusPinned: // nothing
case api.TrackerStatusPinning, api.TrackerStatusPinError:
mpt.set(c, api.TrackerStatusPinned)
case api.TrackerStatusUnpinning:
if time.Since(p.TS) > UnpinningTimeout {
mpt.setError(c, errUnpinningTimeout)
}
case TrackerStatusUnpinned:
case api.TrackerStatusUnpinned:
mpt.setError(c, errPinned)
case TrackerStatusUnpinError: // nothing, keep error as it was
case api.TrackerStatusUnpinError: // nothing, keep error as it was
default:
}
} else {
switch p.Status {
case TrackerStatusPinned:
case api.TrackerStatusPinned:
mpt.setError(c, errUnpinned)
case TrackerStatusPinError: // nothing, keep error as it was
case TrackerStatusPinning:
case api.TrackerStatusPinError: // nothing, keep error as it was
case api.TrackerStatusPinning:
if time.Since(p.TS) > PinningTimeout {
mpt.setError(c, errPinningTimeout)
}
case TrackerStatusUnpinning, TrackerStatusUnpinError:
mpt.set(c, TrackerStatusUnpinned)
case TrackerStatusUnpinned: // nothing
case api.TrackerStatusUnpinning, api.TrackerStatusUnpinError:
mpt.set(c, api.TrackerStatusUnpinned)
case api.TrackerStatusUnpinned: // nothing
default:
}
}
@ -334,18 +332,18 @@ func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips IPFSPinStatus) PinInfo {
// Recover will re-track or re-untrack a Cid in error state,
// possibly retriggering an IPFS pinning operation and returning
// only when it is done.
func (mpt *MapPinTracker) Recover(c *cid.Cid) (PinInfo, error) {
func (mpt *MapPinTracker) Recover(c *cid.Cid) (api.PinInfo, error) {
p := mpt.get(c)
if p.Status != TrackerStatusPinError &&
p.Status != TrackerStatusUnpinError {
if p.Status != api.TrackerStatusPinError &&
p.Status != api.TrackerStatusUnpinError {
return p, nil
}
logger.Infof("Recovering %s", c)
var err error
switch p.Status {
case TrackerStatusPinError:
case api.TrackerStatusPinError:
err = mpt.Track(c)
case TrackerStatusUnpinError:
case api.TrackerStatusUnpinError:
err = mpt.Untrack(c)
}
if err != nil {

View File

@ -160,6 +160,8 @@ func TestClustersPeerRemove(t *testing.T) {
t.Error(err)
}
delay()
f := func(t *testing.T, c *Cluster) {
if c.ID().ID == p { //This is the removed cluster
_, ok := <-c.Done()

View File

@ -2,7 +2,6 @@ package ipfscluster
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net"
@ -12,6 +11,8 @@ import (
"sync"
"time"
"github.com/ipfs/ipfs-cluster/api"
mux "github.com/gorilla/mux"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
@ -69,90 +70,6 @@ func (e errorResp) Error() string {
return e.Message
}
type versionResp struct {
Version string `json:"version"`
}
type pinResp struct {
Pinned string `json:"pinned"`
}
type unpinResp struct {
Unpinned string `json:"unpinned"`
}
type statusInfo struct {
Status string `json:"status"`
Error string `json:"error,omitempty"`
}
type statusCidResp struct {
Cid string `json:"cid"`
PeerMap map[string]statusInfo `json:"peer_map"`
}
type restIPFSIDResp struct {
ID string `json:"id"`
Addresses []string `json:"addresses"`
Error string `json:"error,omitempty"`
}
func newRestIPFSIDResp(id IPFSID) *restIPFSIDResp {
addrs := make([]string, len(id.Addresses), len(id.Addresses))
for i, a := range id.Addresses {
addrs[i] = a.String()
}
return &restIPFSIDResp{
ID: id.ID.Pretty(),
Addresses: addrs,
Error: id.Error,
}
}
type restIDResp struct {
ID string `json:"id"`
PublicKey string `json:"public_key"`
Addresses []string `json:"addresses"`
ClusterPeers []string `json:"cluster_peers"`
Version string `json:"version"`
Commit string `json:"commit"`
RPCProtocolVersion string `json:"rpc_protocol_version"`
Error string `json:"error,omitempty"`
IPFS *restIPFSIDResp `json:"ipfs"`
}
func newRestIDResp(id ID) *restIDResp {
pubKey := ""
if id.PublicKey != nil {
keyBytes, err := id.PublicKey.Bytes()
if err == nil {
pubKey = base64.StdEncoding.EncodeToString(keyBytes)
}
}
addrs := make([]string, len(id.Addresses), len(id.Addresses))
for i, a := range id.Addresses {
addrs[i] = a.String()
}
peers := make([]string, len(id.ClusterPeers), len(id.ClusterPeers))
for i, a := range id.ClusterPeers {
peers[i] = a.String()
}
return &restIDResp{
ID: id.ID.Pretty(),
PublicKey: pubKey,
Addresses: addrs,
ClusterPeers: peers,
Version: id.Version,
Commit: id.Commit,
RPCProtocolVersion: string(id.RPCProtocolVersion),
Error: id.Error,
IPFS: newRestIPFSIDResp(id.IPFS),
}
}
type statusResp []statusCidResp
// NewRESTAPI creates a new object which is ready to be
// started.
func NewRESTAPI(cfg *Config) (*RESTAPI, error) {
@ -209,105 +126,105 @@ func NewRESTAPI(cfg *Config) (*RESTAPI, error) {
return api, nil
}
func (api *RESTAPI) routes() []route {
func (rest *RESTAPI) routes() []route {
return []route{
{
"ID",
"GET",
"/id",
api.idHandler,
rest.idHandler,
},
{
"Version",
"GET",
"/version",
api.versionHandler,
rest.versionHandler,
},
{
"Peers",
"GET",
"/peers",
api.peerListHandler,
rest.peerListHandler,
},
{
"PeerAdd",
"POST",
"/peers",
api.peerAddHandler,
rest.peerAddHandler,
},
{
"PeerRemove",
"DELETE",
"/peers/{peer}",
api.peerRemoveHandler,
rest.peerRemoveHandler,
},
{
"Pins",
"GET",
"/pinlist",
api.pinListHandler,
rest.pinListHandler,
},
{
"StatusAll",
"GET",
"/pins",
api.statusAllHandler,
rest.statusAllHandler,
},
{
"SyncAll",
"POST",
"/pins/sync",
api.syncAllHandler,
rest.syncAllHandler,
},
{
"Status",
"GET",
"/pins/{hash}",
api.statusHandler,
rest.statusHandler,
},
{
"Pin",
"POST",
"/pins/{hash}",
api.pinHandler,
rest.pinHandler,
},
{
"Unpin",
"DELETE",
"/pins/{hash}",
api.unpinHandler,
rest.unpinHandler,
},
{
"Sync",
"POST",
"/pins/{hash}/sync",
api.syncHandler,
rest.syncHandler,
},
{
"Recover",
"POST",
"/pins/{hash}/recover",
api.recoverHandler,
rest.recoverHandler,
},
}
}
func (api *RESTAPI) run() {
api.wg.Add(1)
func (rest *RESTAPI) run() {
rest.wg.Add(1)
go func() {
defer api.wg.Done()
defer rest.wg.Done()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
api.ctx = ctx
rest.ctx = ctx
<-api.rpcReady
<-rest.rpcReady
logger.Infof("REST API: %s", api.apiAddr)
err := api.server.Serve(api.listener)
logger.Infof("REST API: %s", rest.apiAddr)
err := rest.server.Serve(rest.listener)
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
@ -315,79 +232,68 @@ func (api *RESTAPI) run() {
}
// Shutdown stops any API listeners.
func (api *RESTAPI) Shutdown() error {
api.shutdownLock.Lock()
defer api.shutdownLock.Unlock()
func (rest *RESTAPI) Shutdown() error {
rest.shutdownLock.Lock()
defer rest.shutdownLock.Unlock()
if api.shutdown {
if rest.shutdown {
logger.Debug("already shutdown")
return nil
}
logger.Info("stopping Cluster API")
close(api.rpcReady)
close(rest.rpcReady)
// Cancel any outstanding ops
api.server.SetKeepAlivesEnabled(false)
api.listener.Close()
rest.server.SetKeepAlivesEnabled(false)
rest.listener.Close()
api.wg.Wait()
api.shutdown = true
rest.wg.Wait()
rest.shutdown = true
return nil
}
// SetClient makes the component ready to perform RPC
// requests.
func (api *RESTAPI) SetClient(c *rpc.Client) {
api.rpcClient = c
api.rpcReady <- struct{}{}
func (rest *RESTAPI) SetClient(c *rpc.Client) {
rest.rpcClient = c
rest.rpcReady <- struct{}{}
}
func (api *RESTAPI) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := IDSerial{}
err := api.rpcClient.Call("",
func (rest *RESTAPI) idHandler(w http.ResponseWriter, r *http.Request) {
idSerial := api.IDSerial{}
err := rest.rpcClient.Call("",
"Cluster",
"ID",
struct{}{},
&idSerial)
if checkRPCErr(w, err) {
resp := newRestIDResp(idSerial.ToID())
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, idSerial)
}
func (api *RESTAPI) versionHandler(w http.ResponseWriter, r *http.Request) {
var v string
err := api.rpcClient.Call("",
func (rest *RESTAPI) versionHandler(w http.ResponseWriter, r *http.Request) {
var v api.Version
err := rest.rpcClient.Call("",
"Cluster",
"Version",
struct{}{},
&v)
if checkRPCErr(w, err) {
sendJSONResponse(w, 200, versionResp{v})
}
sendResponse(w, err, v)
}
func (api *RESTAPI) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []IDSerial
err := api.rpcClient.Call("",
func (rest *RESTAPI) peerListHandler(w http.ResponseWriter, r *http.Request) {
var peersSerial []api.IDSerial
err := rest.rpcClient.Call("",
"Cluster",
"Peers",
struct{}{},
&peersSerial)
if checkRPCErr(w, err) {
var resp []*restIDResp
for _, pS := range peersSerial {
p := pS.ToID()
resp = append(resp, newRestIDResp(p))
}
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, peersSerial)
}
func (api *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
func (rest *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
@ -404,145 +310,123 @@ func (api *RESTAPI) peerAddHandler(w http.ResponseWriter, r *http.Request) {
return
}
var ids IDSerial
err = api.rpcClient.Call("",
var ids api.IDSerial
err = rest.rpcClient.Call("",
"Cluster",
"PeerAdd",
MultiaddrToSerial(mAddr),
api.MultiaddrToSerial(mAddr),
&ids)
if checkRPCErr(w, err) {
resp := newRestIDResp(ids.ToID())
sendJSONResponse(w, 200, resp)
}
sendResponse(w, err, ids)
}
func (api *RESTAPI) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
func (rest *RESTAPI) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
if p := parsePidOrError(w, r); p != "" {
err := api.rpcClient.Call("",
err := rest.rpcClient.Call("",
"Cluster",
"PeerRemove",
p,
&struct{}{})
if checkRPCErr(w, err) {
sendEmptyResponse(w)
}
sendEmptyResponse(w, err)
}
}
func (api *RESTAPI) pinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
err := api.rpcClient.Call("",
func (rest *RESTAPI) pinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
err := rest.rpcClient.Call("",
"Cluster",
"Pin",
c,
&struct{}{})
if checkRPCErr(w, err) {
sendAcceptedResponse(w)
}
sendAcceptedResponse(w, err)
}
}
func (api *RESTAPI) unpinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
err := api.rpcClient.Call("",
func (rest *RESTAPI) unpinHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
err := rest.rpcClient.Call("",
"Cluster",
"Unpin",
c,
&struct{}{})
if checkRPCErr(w, err) {
sendAcceptedResponse(w)
}
sendAcceptedResponse(w, err)
}
}
func (api *RESTAPI) pinListHandler(w http.ResponseWriter, r *http.Request) {
func (rest *RESTAPI) pinListHandler(w http.ResponseWriter, r *http.Request) {
var pins []string
err := api.rpcClient.Call("",
err := rest.rpcClient.Call("",
"Cluster",
"PinList",
struct{}{},
&pins)
if checkRPCErr(w, err) {
sendJSONResponse(w, 200, pins)
}
sendResponse(w, err, pins)
}
func (api *RESTAPI) statusAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) statusAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"StatusAll",
struct{}{},
&pinInfos)
if checkRPCErr(w, err) {
sendStatusResponse(w, http.StatusOK, pinInfos)
}
sendResponse(w, err, pinInfos)
}
func (api *RESTAPI) statusHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) statusHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Status",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func (api *RESTAPI) syncAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) syncAllHandler(w http.ResponseWriter, r *http.Request) {
var pinInfos []api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"SyncAll",
struct{}{},
&pinInfos)
if checkRPCErr(w, err) {
sendStatusResponse(w, http.StatusAccepted, pinInfos)
}
sendResponse(w, err, pinInfos)
}
func (api *RESTAPI) syncHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) syncHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Sync",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func (api *RESTAPI) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c != nil {
var pinInfo GlobalPinInfo
err := api.rpcClient.Call("",
func (rest *RESTAPI) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo api.GlobalPinInfoSerial
err := rest.rpcClient.Call("",
"Cluster",
"Recover",
c,
&pinInfo)
if checkRPCErr(w, err) {
sendStatusCidResponse(w, http.StatusOK, pinInfo)
}
sendResponse(w, err, pinInfo)
}
}
func parseCidOrError(w http.ResponseWriter, r *http.Request) *CidArg {
func parseCidOrError(w http.ResponseWriter, r *http.Request) api.CidArgSerial {
vars := mux.Vars(r)
hash := vars["hash"]
_, err := cid.Decode(hash)
if err != nil {
sendErrorResponse(w, 400, "error decoding Cid: "+err.Error())
return nil
return api.CidArgSerial{""}
}
return &CidArg{hash}
return api.CidArgSerial{hash}
}
func parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID {
@ -556,6 +440,12 @@ func parsePidOrError(w http.ResponseWriter, r *http.Request) peer.ID {
return pid
}
func sendResponse(w http.ResponseWriter, rpcErr error, resp interface{}) {
if checkRPCErr(w, rpcErr) {
sendJSONResponse(w, 200, resp)
}
}
// checkRPCErr takes care of returning standard error responses if we
// pass an error to it. It returns true when everythings OK (no error
// was handled), or false otherwise.
@ -567,12 +457,16 @@ func checkRPCErr(w http.ResponseWriter, err error) bool {
return true
}
func sendEmptyResponse(w http.ResponseWriter) {
func sendEmptyResponse(w http.ResponseWriter, rpcErr error) {
if checkRPCErr(w, rpcErr) {
w.WriteHeader(http.StatusNoContent)
}
}
func sendAcceptedResponse(w http.ResponseWriter) {
func sendAcceptedResponse(w http.ResponseWriter, rpcErr error) {
if checkRPCErr(w, rpcErr) {
w.WriteHeader(http.StatusAccepted)
}
}
func sendJSONResponse(w http.ResponseWriter, code int, resp interface{}) {
@ -587,30 +481,3 @@ func sendErrorResponse(w http.ResponseWriter, code int, msg string) {
logger.Errorf("sending error response: %d: %s", code, msg)
sendJSONResponse(w, code, errorResp)
}
func transformPinToStatusCid(p GlobalPinInfo) statusCidResp {
s := statusCidResp{}
s.Cid = p.Cid.String()
s.PeerMap = make(map[string]statusInfo)
for k, v := range p.PeerMap {
s.PeerMap[k.Pretty()] = statusInfo{
Status: v.Status.String(),
Error: v.Error,
}
}
return s
}
func sendStatusResponse(w http.ResponseWriter, code int, data []GlobalPinInfo) {
pins := make(statusResp, 0, len(data))
for _, d := range data {
pins = append(pins, transformPinToStatusCid(d))
}
sendJSONResponse(w, code, pins)
}
func sendStatusCidResponse(w http.ResponseWriter, code int, data GlobalPinInfo) {
st := transformPinToStatusCid(data)
sendJSONResponse(w, code, st)
}

View File

@ -7,6 +7,8 @@ import (
"io/ioutil"
"net/http"
"testing"
"github.com/ipfs/ipfs-cluster/api"
)
var (
@ -16,16 +18,16 @@ var (
func testRESTAPI(t *testing.T) *RESTAPI {
//logging.SetDebugLogging()
cfg := testingConfig()
api, err := NewRESTAPI(cfg)
rest, err := NewRESTAPI(cfg)
if err != nil {
t.Fatal("should be able to create a new Api: ", err)
}
// No keep alive! Otherwise tests hang with
// connections re-used from previous tests
api.server.SetKeepAlivesEnabled(false)
api.SetClient(mockRPCClient(t))
return api
rest.server.SetKeepAlivesEnabled(false)
rest.SetClient(mockRPCClient(t))
return rest
}
func processResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) {
@ -65,19 +67,19 @@ func makeDelete(t *testing.T, path string, resp interface{}) {
}
func TestRESTAPIShutdown(t *testing.T) {
api := testRESTAPI(t)
err := api.Shutdown()
rest := testRESTAPI(t)
err := rest.Shutdown()
if err != nil {
t.Error("should shutdown cleanly: ", err)
}
// test shutting down twice
api.Shutdown()
rest.Shutdown()
}
func TestRestAPIIDEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
id := restIDResp{}
rest := testRESTAPI(t)
defer rest.Shutdown()
id := api.IDSerial{}
makeGet(t, "/id", &id)
if id.ID != testPeerID.Pretty() {
t.Error("expected correct id")
@ -85,9 +87,9 @@ func TestRestAPIIDEndpoint(t *testing.T) {
}
func TestRESTAPIVersionEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
ver := versionResp{}
rest := testRESTAPI(t)
defer rest.Shutdown()
ver := api.Version{}
makeGet(t, "/version", &ver)
if ver.Version != "0.0.mock" {
t.Error("expected correct version")
@ -95,10 +97,10 @@ func TestRESTAPIVersionEndpoint(t *testing.T) {
}
func TestRESTAPIPeerstEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var list []restIDResp
var list []api.IDSerial
makeGet(t, "/peers", &list)
if len(list) != 1 {
t.Fatal("expected 1 element")
@ -109,10 +111,10 @@ func TestRESTAPIPeerstEndpoint(t *testing.T) {
}
func TestRESTAPIPeerAddEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
id := restIDResp{}
id := api.IDSerial{}
// post with valid body
body := fmt.Sprintf("{\"peer_multiaddress\":\"/ip4/1.2.3.4/tcp/1234/ipfs/%s\"}", testPeerID.Pretty())
t.Log(body)
@ -139,15 +141,15 @@ func TestRESTAPIPeerAddEndpoint(t *testing.T) {
}
func TestRESTAPIPeerRemoveEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
makeDelete(t, "/peers/"+testPeerID.Pretty(), &struct{}{})
}
func TestRESTAPIPinEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
// test regular post
makePost(t, "/pins/"+testCid, []byte{}, &struct{}{})
@ -165,8 +167,8 @@ func TestRESTAPIPinEndpoint(t *testing.T) {
}
func TestRESTAPIUnpinEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
// test regular delete
makeDelete(t, "/pins/"+testCid, &struct{}{})
@ -184,8 +186,8 @@ func TestRESTAPIUnpinEndpoint(t *testing.T) {
}
func TestRESTAPIPinListEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp []string
makeGet(t, "/pinlist", &resp)
@ -197,10 +199,10 @@ func TestRESTAPIPinListEndpoint(t *testing.T) {
}
func TestRESTAPIStatusAllEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusResp
var resp []api.GlobalPinInfoSerial
makeGet(t, "/pins", &resp)
if len(resp) != 3 ||
resp[0].Cid != testCid1 ||
@ -210,10 +212,10 @@ func TestRESTAPIStatusAllEndpoint(t *testing.T) {
}
func TestRESTAPIStatusEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
var resp api.GlobalPinInfoSerial
makeGet(t, "/pins/"+testCid, &resp)
if resp.Cid != testCid {
@ -229,10 +231,10 @@ func TestRESTAPIStatusEndpoint(t *testing.T) {
}
func TestRESTAPISyncAllEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusResp
var resp []api.GlobalPinInfoSerial
makePost(t, "/pins/sync", []byte{}, &resp)
if len(resp) != 3 ||
@ -243,10 +245,10 @@ func TestRESTAPISyncAllEndpoint(t *testing.T) {
}
func TestRESTAPISyncEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+testCid+"/sync", []byte{}, &resp)
if resp.Cid != testCid {
@ -262,10 +264,10 @@ func TestRESTAPISyncEndpoint(t *testing.T) {
}
func TestRESTAPIRecoverEndpoint(t *testing.T) {
api := testRESTAPI(t)
defer api.Shutdown()
rest := testRESTAPI(t)
defer rest.Shutdown()
var resp statusCidResp
var resp api.GlobalPinInfoSerial
makePost(t, "/pins/"+testCid+"/recover", []byte{}, &resp)
if resp.Cid != testCid {

View File

@ -3,8 +3,9 @@ package ipfscluster
import (
"errors"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/ipfs/ipfs-cluster/api"
)
// RPCAPI is a go-libp2p-gorpc service which provides the internal ipfs-cluster
@ -15,31 +16,7 @@ import (
// the different components of ipfs-cluster, with very little added logic.
// Refer to documentation on those methods for details on their behaviour.
type RPCAPI struct {
cluster *Cluster
}
// CidArg is an arguments that carry a Cid. It may carry more things in the
// future.
type CidArg struct {
Cid string
}
// NewCidArg returns a CidArg which carries the given Cid. It panics if it is
// nil.
func NewCidArg(c *cid.Cid) *CidArg {
if c == nil {
panic("Cid cannot be nil")
}
return &CidArg{c.String()}
}
// CID decodes and returns a Cid from a CidArg.
func (arg *CidArg) CID() (*cid.Cid, error) {
c, err := cid.Decode(arg.Cid)
if err != nil {
return nil, err
}
return c, nil
c *Cluster
}
/*
@ -47,33 +24,27 @@ func (arg *CidArg) CID() (*cid.Cid, error) {
*/
// ID runs Cluster.ID()
func (api *RPCAPI) ID(in struct{}, out *IDSerial) error {
id := api.cluster.ID().ToSerial()
func (rpcapi *RPCAPI) ID(in struct{}, out *api.IDSerial) error {
id := rpcapi.c.ID().ToSerial()
*out = id
return nil
}
// Pin runs Cluster.Pin().
func (api *RPCAPI) Pin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.Pin(c)
func (rpcapi *RPCAPI) Pin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.Pin(c)
}
// Unpin runs Cluster.Unpin().
func (api *RPCAPI) Unpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.Unpin(c)
func (rpcapi *RPCAPI) Unpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.Unpin(c)
}
// PinList runs Cluster.Pins().
func (api *RPCAPI) PinList(in struct{}, out *[]string) error {
cidList := api.cluster.Pins()
func (rpcapi *RPCAPI) PinList(in struct{}, out *[]string) error {
cidList := rpcapi.c.Pins()
cidStrList := make([]string, 0, len(cidList))
for _, c := range cidList {
cidStrList = append(cidStrList, c.String())
@ -83,15 +54,15 @@ func (api *RPCAPI) PinList(in struct{}, out *[]string) error {
}
// Version runs Cluster.Version().
func (api *RPCAPI) Version(in struct{}, out *string) error {
*out = api.cluster.Version()
func (rpcapi *RPCAPI) Version(in struct{}, out *api.Version) error {
*out = api.Version{rpcapi.c.Version()}
return nil
}
// Peers runs Cluster.Peers().
func (api *RPCAPI) Peers(in struct{}, out *[]IDSerial) error {
peers := api.cluster.Peers()
var sPeers []IDSerial
func (rpcapi *RPCAPI) Peers(in struct{}, out *[]api.IDSerial) error {
peers := rpcapi.c.Peers()
var sPeers []api.IDSerial
for _, p := range peers {
sPeers = append(sPeers, p.ToSerial())
}
@ -100,94 +71,82 @@ func (api *RPCAPI) Peers(in struct{}, out *[]IDSerial) error {
}
// PeerAdd runs Cluster.PeerAdd().
func (api *RPCAPI) PeerAdd(in MultiaddrSerial, out *IDSerial) error {
func (rpcapi *RPCAPI) PeerAdd(in api.MultiaddrSerial, out *api.IDSerial) error {
addr := in.ToMultiaddr()
id, err := api.cluster.PeerAdd(addr)
id, err := rpcapi.c.PeerAdd(addr)
*out = id.ToSerial()
return err
}
// PeerRemove runs Cluster.PeerRm().
func (api *RPCAPI) PeerRemove(in peer.ID, out *struct{}) error {
return api.cluster.PeerRemove(in)
func (rpcapi *RPCAPI) PeerRemove(in peer.ID, out *struct{}) error {
return rpcapi.c.PeerRemove(in)
}
// Join runs Cluster.Join().
func (api *RPCAPI) Join(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) Join(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := api.cluster.Join(addr)
err := rpcapi.c.Join(addr)
return err
}
// StatusAll runs Cluster.StatusAll().
func (api *RPCAPI) StatusAll(in struct{}, out *[]GlobalPinInfo) error {
pinfo, err := api.cluster.StatusAll()
*out = pinfo
func (rpcapi *RPCAPI) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
pinfos, err := rpcapi.c.StatusAll()
*out = globalPinInfoSliceToSerial(pinfos)
return err
}
// Status runs Cluster.Status().
func (api *RPCAPI) Status(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Status(c)
*out = pinfo
func (rpcapi *RPCAPI) Status(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Status(c)
*out = pinfo.ToSerial()
return err
}
// SyncAllLocal runs Cluster.SyncAllLocal().
func (api *RPCAPI) SyncAllLocal(in struct{}, out *[]PinInfo) error {
pinfo, err := api.cluster.SyncAllLocal()
*out = pinfo
func (rpcapi *RPCAPI) SyncAllLocal(in struct{}, out *[]api.PinInfoSerial) error {
pinfos, err := rpcapi.c.SyncAllLocal()
*out = pinInfoSliceToSerial(pinfos)
return err
}
// SyncLocal runs Cluster.SyncLocal().
func (api *RPCAPI) SyncLocal(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.SyncLocal(c)
*out = pinfo
func (rpcapi *RPCAPI) SyncLocal(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.SyncLocal(c)
*out = pinfo.ToSerial()
return err
}
// SyncAll runs Cluster.SyncAll().
func (api *RPCAPI) SyncAll(in struct{}, out *[]GlobalPinInfo) error {
pinfo, err := api.cluster.SyncAll()
*out = pinfo
func (rpcapi *RPCAPI) SyncAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
pinfos, err := rpcapi.c.SyncAll()
*out = globalPinInfoSliceToSerial(pinfos)
return err
}
// Sync runs Cluster.Sync().
func (api *RPCAPI) Sync(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Sync(c)
*out = pinfo
func (rpcapi *RPCAPI) Sync(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Sync(c)
*out = pinfo.ToSerial()
return err
}
// StateSync runs Cluster.StateSync().
func (api *RPCAPI) StateSync(in struct{}, out *[]PinInfo) error {
pinfo, err := api.cluster.StateSync()
*out = pinfo
func (rpcapi *RPCAPI) StateSync(in struct{}, out *[]api.PinInfoSerial) error {
pinfos, err := rpcapi.c.StateSync()
*out = pinInfoSliceToSerial(pinfos)
return err
}
// Recover runs Cluster.Recover().
func (api *RPCAPI) Recover(in *CidArg, out *GlobalPinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.Recover(c)
*out = pinfo
func (rpcapi *RPCAPI) Recover(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.Recover(c)
*out = pinfo.ToSerial()
return err
}
@ -196,48 +155,36 @@ func (api *RPCAPI) Recover(in *CidArg, out *GlobalPinInfo) error {
*/
// Track runs PinTracker.Track().
func (api *RPCAPI) Track(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.tracker.Track(c)
func (rpcapi *RPCAPI) Track(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.tracker.Track(c)
}
// Untrack runs PinTracker.Untrack().
func (api *RPCAPI) Untrack(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.tracker.Untrack(c)
func (rpcapi *RPCAPI) Untrack(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.tracker.Untrack(c)
}
// TrackerStatusAll runs PinTracker.StatusAll().
func (api *RPCAPI) TrackerStatusAll(in struct{}, out *[]PinInfo) error {
*out = api.cluster.tracker.StatusAll()
func (rpcapi *RPCAPI) TrackerStatusAll(in struct{}, out *[]api.PinInfoSerial) error {
*out = pinInfoSliceToSerial(rpcapi.c.tracker.StatusAll())
return nil
}
// TrackerStatus runs PinTracker.Status().
func (api *RPCAPI) TrackerStatus(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo := api.cluster.tracker.Status(c)
*out = pinfo
func (rpcapi *RPCAPI) TrackerStatus(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo := rpcapi.c.tracker.Status(c)
*out = pinfo.ToSerial()
return nil
}
// TrackerRecover runs PinTracker.Recover().
func (api *RPCAPI) TrackerRecover(in *CidArg, out *PinInfo) error {
c, err := in.CID()
if err != nil {
return err
}
pinfo, err := api.cluster.tracker.Recover(c)
*out = pinfo
func (rpcapi *RPCAPI) TrackerRecover(in api.CidArgSerial, out *api.PinInfoSerial) error {
c := in.ToCidArg().Cid
pinfo, err := rpcapi.c.tracker.Recover(c)
*out = pinfo.ToSerial()
return err
}
@ -246,37 +193,28 @@ func (api *RPCAPI) TrackerRecover(in *CidArg, out *PinInfo) error {
*/
// IPFSPin runs IPFSConnector.Pin().
func (api *RPCAPI) IPFSPin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.ipfs.Pin(c)
func (rpcapi *RPCAPI) IPFSPin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.ipfs.Pin(c)
}
// IPFSUnpin runs IPFSConnector.Unpin().
func (api *RPCAPI) IPFSUnpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.ipfs.Unpin(c)
func (rpcapi *RPCAPI) IPFSUnpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.ipfs.Unpin(c)
}
// IPFSPinLsCid runs IPFSConnector.PinLsCid().
func (api *RPCAPI) IPFSPinLsCid(in *CidArg, out *IPFSPinStatus) error {
c, err := in.CID()
if err != nil {
return err
}
b, err := api.cluster.ipfs.PinLsCid(c)
func (rpcapi *RPCAPI) IPFSPinLsCid(in api.CidArgSerial, out *api.IPFSPinStatus) error {
c := in.ToCidArg().Cid
b, err := rpcapi.c.ipfs.PinLsCid(c)
*out = b
return err
}
// IPFSPinLs runs IPFSConnector.PinLs().
func (api *RPCAPI) IPFSPinLs(in struct{}, out *map[string]IPFSPinStatus) error {
m, err := api.cluster.ipfs.PinLs()
func (rpcapi *RPCAPI) IPFSPinLs(in struct{}, out *map[string]api.IPFSPinStatus) error {
m, err := rpcapi.c.ipfs.PinLs()
*out = m
return err
}
@ -286,32 +224,26 @@ func (api *RPCAPI) IPFSPinLs(in struct{}, out *map[string]IPFSPinStatus) error {
*/
// ConsensusLogPin runs Consensus.LogPin().
func (api *RPCAPI) ConsensusLogPin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.consensus.LogPin(c)
func (rpcapi *RPCAPI) ConsensusLogPin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.consensus.LogPin(c)
}
// ConsensusLogUnpin runs Consensus.LogUnpin().
func (api *RPCAPI) ConsensusLogUnpin(in *CidArg, out *struct{}) error {
c, err := in.CID()
if err != nil {
return err
}
return api.cluster.consensus.LogUnpin(c)
func (rpcapi *RPCAPI) ConsensusLogUnpin(in api.CidArgSerial, out *struct{}) error {
c := in.ToCidArg().Cid
return rpcapi.c.consensus.LogUnpin(c)
}
// ConsensusLogAddPeer runs Consensus.LogAddPeer().
func (api *RPCAPI) ConsensusLogAddPeer(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) ConsensusLogAddPeer(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
return api.cluster.consensus.LogAddPeer(addr)
return rpcapi.c.consensus.LogAddPeer(addr)
}
// ConsensusLogRmPeer runs Consensus.LogRmPeer().
func (api *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
return api.cluster.consensus.LogRmPeer(in)
func (rpcapi *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
return rpcapi.c.consensus.LogRmPeer(in)
}
/*
@ -319,27 +251,27 @@ func (api *RPCAPI) ConsensusLogRmPeer(in peer.ID, out *struct{}) error {
*/
// PeerManagerAddPeer runs peerManager.addPeer().
func (api *RPCAPI) PeerManagerAddPeer(in MultiaddrSerial, out *struct{}) error {
func (rpcapi *RPCAPI) PeerManagerAddPeer(in api.MultiaddrSerial, out *struct{}) error {
addr := in.ToMultiaddr()
err := api.cluster.peerManager.addPeer(addr)
err := rpcapi.c.peerManager.addPeer(addr)
return err
}
// PeerManagerAddFromMultiaddrs runs peerManager.addFromMultiaddrs().
func (api *RPCAPI) PeerManagerAddFromMultiaddrs(in MultiaddrsSerial, out *struct{}) error {
func (rpcapi *RPCAPI) PeerManagerAddFromMultiaddrs(in api.MultiaddrsSerial, out *struct{}) error {
addrs := in.ToMultiaddrs()
err := api.cluster.peerManager.addFromMultiaddrs(addrs)
err := rpcapi.c.peerManager.addFromMultiaddrs(addrs)
return err
}
// PeerManagerRmPeerShutdown runs peerManager.rmPeer().
func (api *RPCAPI) PeerManagerRmPeerShutdown(in peer.ID, out *struct{}) error {
return api.cluster.peerManager.rmPeer(in, true)
func (rpcapi *RPCAPI) PeerManagerRmPeerShutdown(in peer.ID, out *struct{}) error {
return rpcapi.c.peerManager.rmPeer(in, true)
}
// PeerManagerRmPeer runs peerManager.rmPeer().
func (api *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
return api.cluster.peerManager.rmPeer(in, false)
func (rpcapi *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
return rpcapi.c.peerManager.rmPeer(in, false)
}
/*
@ -350,11 +282,11 @@ func (api *RPCAPI) PeerManagerRmPeer(in peer.ID, out *struct{}) error {
// This is necessary for a peer to figure out which of its multiaddresses the
// peers are seeing (also when crossing NATs). It should be called from
// the peer the IN parameter indicates.
func (api *RPCAPI) RemoteMultiaddrForPeer(in peer.ID, out *MultiaddrSerial) error {
conns := api.cluster.host.Network().ConnsToPeer(in)
func (rpcapi *RPCAPI) RemoteMultiaddrForPeer(in peer.ID, out *api.MultiaddrSerial) error {
conns := rpcapi.c.host.Network().ConnsToPeer(in)
if len(conns) == 0 {
return errors.New("no connections to: " + in.Pretty())
}
*out = MultiaddrToSerial(multiaddrJoin(conns[0].RemoteMultiaddr(), in))
*out = api.MultiaddrToSerial(multiaddrJoin(conns[0].RemoteMultiaddr(), in))
return nil
}

View File

@ -5,9 +5,10 @@ import (
"testing"
"time"
"github.com/ipfs/ipfs-cluster/api"
rpc "github.com/hsanjuan/go-libp2p-gorpc"
cid "github.com/ipfs/go-cid"
crypto "github.com/libp2p/go-libp2p-crypto"
peer "github.com/libp2p/go-libp2p-peer"
)
@ -25,14 +26,14 @@ func mockRPCClient(t *testing.T) *rpc.Client {
return c
}
func (mock *mockService) Pin(in *CidArg, out *struct{}) error {
func (mock *mockService) Pin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
}
return nil
}
func (mock *mockService) Unpin(in *CidArg, out *struct{}) error {
func (mock *mockService) Unpin(in api.CidArgSerial, out *struct{}) error {
if in.Cid == errorCid {
return errBadCid
}
@ -44,36 +45,36 @@ func (mock *mockService) PinList(in struct{}, out *[]string) error {
return nil
}
func (mock *mockService) ID(in struct{}, out *IDSerial) error {
_, pubkey, _ := crypto.GenerateKeyPair(
DefaultConfigCrypto,
DefaultConfigKeyLength)
*out = ID{
func (mock *mockService) ID(in struct{}, out *api.IDSerial) error {
//_, pubkey, _ := crypto.GenerateKeyPair(
// DefaultConfigCrypto,
// DefaultConfigKeyLength)
*out = api.ID{
ID: testPeerID,
PublicKey: pubkey,
//PublicKey: pubkey,
Version: "0.0.mock",
IPFS: IPFSID{
IPFS: api.IPFSID{
ID: testPeerID,
},
}.ToSerial()
return nil
}
func (mock *mockService) Version(in struct{}, out *string) error {
*out = "0.0.mock"
func (mock *mockService) Version(in struct{}, out *api.Version) error {
*out = api.Version{"0.0.mock"}
return nil
}
func (mock *mockService) Peers(in struct{}, out *[]IDSerial) error {
id := IDSerial{}
func (mock *mockService) Peers(in struct{}, out *[]api.IDSerial) error {
id := api.IDSerial{}
mock.ID(in, &id)
*out = []IDSerial{id}
*out = []api.IDSerial{id}
return nil
}
func (mock *mockService) PeerAdd(in MultiaddrSerial, out *IDSerial) error {
id := IDSerial{}
func (mock *mockService) PeerAdd(in api.MultiaddrSerial, out *api.IDSerial) error {
id := api.IDSerial{}
mock.ID(struct{}{}, &id)
*out = id
return nil
@ -83,88 +84,88 @@ func (mock *mockService) PeerRemove(in peer.ID, out *struct{}) error {
return nil
}
func (mock *mockService) StatusAll(in struct{}, out *[]GlobalPinInfo) error {
func (mock *mockService) StatusAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
c1, _ := cid.Decode(testCid1)
c2, _ := cid.Decode(testCid2)
c3, _ := cid.Decode(testCid3)
*out = []GlobalPinInfo{
*out = globalPinInfoSliceToSerial([]api.GlobalPinInfo{
{
Cid: c1,
PeerMap: map[peer.ID]PinInfo{
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
CidStr: testCid1,
Cid: c1,
Peer: testPeerID,
Status: TrackerStatusPinned,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
},
{
Cid: c2,
PeerMap: map[peer.ID]PinInfo{
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
CidStr: testCid2,
Cid: c2,
Peer: testPeerID,
Status: TrackerStatusPinning,
Status: api.TrackerStatusPinning,
TS: time.Now(),
},
},
},
{
Cid: c3,
PeerMap: map[peer.ID]PinInfo{
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
CidStr: testCid3,
Cid: c3,
Peer: testPeerID,
Status: TrackerStatusPinError,
Status: api.TrackerStatusPinError,
TS: time.Now(),
},
},
},
}
})
return nil
}
func (mock *mockService) Status(in *CidArg, out *GlobalPinInfo) error {
func (mock *mockService) Status(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
if in.Cid == errorCid {
return errBadCid
}
c1, _ := cid.Decode(testCid1)
*out = GlobalPinInfo{
*out = api.GlobalPinInfo{
Cid: c1,
PeerMap: map[peer.ID]PinInfo{
PeerMap: map[peer.ID]api.PinInfo{
testPeerID: {
CidStr: testCid1,
Cid: c1,
Peer: testPeerID,
Status: TrackerStatusPinned,
Status: api.TrackerStatusPinned,
TS: time.Now(),
},
},
}
}.ToSerial()
return nil
}
func (mock *mockService) SyncAll(in struct{}, out *[]GlobalPinInfo) error {
func (mock *mockService) SyncAll(in struct{}, out *[]api.GlobalPinInfoSerial) error {
return mock.StatusAll(in, out)
}
func (mock *mockService) Sync(in *CidArg, out *GlobalPinInfo) error {
func (mock *mockService) Sync(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
return mock.Status(in, out)
}
func (mock *mockService) StateSync(in struct{}, out *[]PinInfo) error {
*out = []PinInfo{}
func (mock *mockService) StateSync(in struct{}, out *[]api.PinInfoSerial) error {
*out = make([]api.PinInfoSerial, 0, 0)
return nil
}
func (mock *mockService) Recover(in *CidArg, out *GlobalPinInfo) error {
func (mock *mockService) Recover(in api.CidArgSerial, out *api.GlobalPinInfoSerial) error {
return mock.Status(in, out)
}
func (mock *mockService) Track(in *CidArg, out *struct{}) error {
func (mock *mockService) Track(in api.CidArgSerial, out *struct{}) error {
return nil
}
func (mock *mockService) Untrack(in *CidArg, out *struct{}) error {
func (mock *mockService) Untrack(in api.CidArgSerial, out *struct{}) error {
return nil
}

25
util.go
View File

@ -3,8 +3,9 @@ package ipfscluster
import (
"fmt"
host "github.com/libp2p/go-libp2p-host"
"github.com/ipfs/ipfs-cluster/api"
host "github.com/libp2p/go-libp2p-host"
peer "github.com/libp2p/go-libp2p-peer"
ma "github.com/multiformats/go-multiaddr"
)
@ -18,7 +19,7 @@ import (
// return ifaces
// }
func copyIDSerialsToIfaces(in []IDSerial) []interface{} {
func copyIDSerialsToIfaces(in []api.IDSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -26,7 +27,7 @@ func copyIDSerialsToIfaces(in []IDSerial) []interface{} {
return ifaces
}
func copyPinInfoToIfaces(in []PinInfo) []interface{} {
func copyPinInfoSerialToIfaces(in []api.PinInfoSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -34,7 +35,7 @@ func copyPinInfoToIfaces(in []PinInfo) []interface{} {
return ifaces
}
func copyPinInfoSliceToIfaces(in [][]PinInfo) []interface{} {
func copyPinInfoSerialSliceToIfaces(in [][]api.PinInfoSerial) []interface{} {
ifaces := make([]interface{}, len(in), len(in))
for i := range in {
ifaces[i] = &in[i]
@ -120,3 +121,19 @@ func getRemoteMultiaddr(h host.Host, pid peer.ID, addr ma.Multiaddr) ma.Multiadd
}
return multiaddrJoin(addr, pid)
}
func pinInfoSliceToSerial(pi []api.PinInfo) []api.PinInfoSerial {
pis := make([]api.PinInfoSerial, len(pi), len(pi))
for i, v := range pi {
pis[i] = v.ToSerial()
}
return pis
}
func globalPinInfoSliceToSerial(gpi []api.GlobalPinInfo) []api.GlobalPinInfoSerial {
gpis := make([]api.GlobalPinInfoSerial, len(gpi), len(gpi))
for i, v := range gpi {
gpis[i] = v.ToSerial()
}
return gpis
}