update go-cid and go-libp2p

License: MIT
Signed-off-by: Adrian Lanzafame <adrianlanzafame92@gmail.com>
This commit is contained in:
Adrian Lanzafame 2018-09-22 11:00:10 +10:00
parent 753322cdc1
commit 31474f6490
No known key found for this signature in database
GPG Key ID: 87E40C5D62EAE192
35 changed files with 197 additions and 192 deletions

View File

@ -29,7 +29,7 @@ type ClusterDAGService interface {
ipld.DAGService
// Finalize receives the IPFS content root CID as
// returned by the ipfs adder.
Finalize(ctx context.Context, ipfsRoot *cid.Cid) (*cid.Cid, error)
Finalize(ctx context.Context, ipfsRoot cid.Cid) (cid.Cid, error)
}
// Adder is used to add content to IPFS Cluster using an implementation of
@ -81,7 +81,7 @@ func (a *Adder) setContext(ctx context.Context) {
// FromMultipart adds content from a multipart.Reader. The adder will
// no longer be usable after calling this method.
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (*cid.Cid, error) {
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (cid.Cid, error) {
logger.Debugf("adding from multipart with params: %+v", a.params)
f := &files.MultipartFile{
@ -94,12 +94,12 @@ func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (*cid.Ci
// FromFiles adds content from a files.File. The adder will no longer
// be usable after calling this method.
func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
func (a *Adder) FromFiles(ctx context.Context, f files.File) (cid.Cid, error) {
logger.Debugf("adding from files")
a.setContext(ctx)
if a.ctx.Err() != nil { // don't allow running twice
return nil, a.ctx.Err()
return cid.Undef, a.ctx.Err()
}
defer a.cancel()
@ -108,7 +108,7 @@ func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
ipfsAdder, err := ipfsadd.NewAdder(a.ctx, a.dgs)
if err != nil {
logger.Error(err)
return nil, err
return cid.Undef, err
}
ipfsAdder.Hidden = a.params.Hidden
@ -122,12 +122,12 @@ func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
// Set up prefix
prefix, err := merkledag.PrefixForCidVersion(a.params.CidVersion)
if err != nil {
return nil, fmt.Errorf("bad CID Version: %s", err)
return cid.Undef, fmt.Errorf("bad CID Version: %s", err)
}
hashFunCode, ok := multihash.Names[strings.ToLower(a.params.HashFun)]
if !ok {
return nil, fmt.Errorf("unrecognized hash function: %s", a.params.HashFun)
return cid.Undef, fmt.Errorf("unrecognized hash function: %s", a.params.HashFun)
}
prefix.MhType = hashFunCode
prefix.MhLength = -1
@ -136,7 +136,7 @@ func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
for {
select {
case <-a.ctx.Done():
return nil, a.ctx.Err()
return cid.Undef, a.ctx.Err()
default:
err := addFile(f, ipfsAdder)
if err == io.EOF {
@ -144,7 +144,7 @@ func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
}
if err != nil {
logger.Error("error adding to cluster: ", err)
return nil, err
return cid.Undef, err
}
}
}
@ -152,12 +152,12 @@ func (a *Adder) FromFiles(ctx context.Context, f files.File) (*cid.Cid, error) {
FINALIZE:
adderRoot, err := ipfsAdder.Finalize()
if err != nil {
return nil, err
return cid.Undef, err
}
clusterRoot, err := a.dgs.Finalize(a.ctx, adderRoot.Cid())
if err != nil {
logger.Error("error finalizing adder:", err)
return nil, err
return cid.Undef, err
}
logger.Infof("%s successfully added to cluster", clusterRoot)
return clusterRoot, nil

View File

@ -34,7 +34,7 @@ func (dag *mockCDAGServ) AddMany(ctx context.Context, nodes []ipld.Node) error {
return nil
}
func (dag *mockCDAGServ) Finalize(ctx context.Context, root *cid.Cid) (*cid.Cid, error) {
func (dag *mockCDAGServ) Finalize(ctx context.Context, root cid.Cid) (cid.Cid, error) {
return root, nil
}

View File

@ -28,7 +28,7 @@ func AddMultipartHTTPHandler(
params *api.AddParams,
reader *multipart.Reader,
w http.ResponseWriter,
) (*cid.Cid, error) {
) (cid.Cid, error) {
var dags adder.ClusterDAGService
output := make(chan *api.AddedOutput, 200)
flusher, flush := w.(http.Flusher)

View File

@ -59,7 +59,7 @@ type Adder struct {
Chunker string
root ipld.Node
mroot *mfs.Root
tempRoot *cid.Cid
tempRoot cid.Cid
Prefix *cid.Prefix
CidBuilder cid.Builder
liveNodes uint64

View File

@ -66,7 +66,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
}
// Finalize pins the last Cid added to this DAGService.
func (dgs *DAGService) Finalize(ctx context.Context, root *cid.Cid) (*cid.Cid, error) {
func (dgs *DAGService) Finalize(ctx context.Context, root cid.Cid) (cid.Cid, error) {
// Cluster pin the result
rootPin := api.PinWithOpts(root, dgs.pinOpts)
rootPin.Allocations = dgs.dests

View File

@ -65,7 +65,7 @@ func CborDataToNode(raw []byte, format string) (ipld.Node, error) {
return shardNode, nil
}
func makeDAGSimple(ctx context.Context, dagObj map[string]*cid.Cid) (ipld.Node, error) {
func makeDAGSimple(ctx context.Context, dagObj map[string]cid.Cid) (ipld.Node, error) {
node, err := cbor.WrapObject(
dagObj,
hashFn, mh.DefaultLengths[hashFn],
@ -83,7 +83,7 @@ func makeDAGSimple(ctx context.Context, dagObj map[string]*cid.Cid) (ipld.Node,
// carry links to the data nodes being tracked. The head of the output slice
// is always the root of the shardDAG, i.e. the ipld node that should be
// recursively pinned to track the shard
func makeDAG(ctx context.Context, dagObj map[string]*cid.Cid) ([]ipld.Node, error) {
func makeDAG(ctx context.Context, dagObj map[string]cid.Cid) ([]ipld.Node, error) {
// FIXME: We have a 4MB limit on the block size enforced by bitswap:
// https://github.com/libp2p/go-libp2p-net/blob/master/interface.go#L20
@ -93,11 +93,11 @@ func makeDAG(ctx context.Context, dagObj map[string]*cid.Cid) ([]ipld.Node, erro
return []ipld.Node{n}, err
}
// Indirect node required
leafNodes := make([]ipld.Node, 0) // shardNodes with links to data
indirectObj := make(map[string]*cid.Cid) // shardNode with links to shardNodes
leafNodes := make([]ipld.Node, 0) // shardNodes with links to data
indirectObj := make(map[string]cid.Cid) // shardNode with links to shardNodes
numFullLeaves := len(dagObj) / MaxLinks
for i := 0; i <= numFullLeaves; i++ {
leafObj := make(map[string]*cid.Cid)
leafObj := make(map[string]cid.Cid)
for j := 0; j < MaxLinks; j++ {
c, ok := dagObj[fmt.Sprintf("%d", i*MaxLinks+j)]
if !ok { // finished with this leaf before filling all the way
@ -182,7 +182,7 @@ func indirectCount(linkNum int) uint64 {
if q == 0 { // no indirect node needed
return 0
}
dummyIndirect := make(map[string]*cid.Cid)
dummyIndirect := make(map[string]cid.Cid)
for key := 0; key <= q; key++ {
dummyIndirect[fmt.Sprintf("%d", key)] = nil
}

View File

@ -41,10 +41,10 @@ type DAGService struct {
// Current shard being built
currentShard *shard
// Last flushed shard CID
previousShard *cid.Cid
previousShard cid.Cid
// shard tracking
shards map[string]*cid.Cid
shards map[string]cid.Cid
startTime time.Time
totalSize uint64
@ -58,7 +58,7 @@ func New(rpc *rpc.Client, opts api.PinOptions, out chan<- *api.AddedOutput) *DAG
pinOpts: opts,
output: out,
addedSet: cid.NewSet(),
shards: make(map[string]*cid.Cid),
shards: make(map[string]cid.Cid),
startTime: time.Now(),
}
}
@ -86,7 +86,7 @@ func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
// Finalize finishes sharding, creates the cluster DAG and pins it along
// with the meta pin for the root node of the content.
func (dgs *DAGService) Finalize(ctx context.Context, dataRoot *cid.Cid) (*cid.Cid, error) {
func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid, error) {
lastCid, err := dgs.flushCurrentShard(ctx)
if err != nil {
return lastCid, err
@ -214,7 +214,7 @@ func (dgs *DAGService) ingestBlock(ctx context.Context, n *api.NodeWithMeta) err
return dgs.ingestBlock(ctx, n) // <-- retry ingest
}
func (dgs *DAGService) logStats(metaPin, clusterDAGPin *cid.Cid) {
func (dgs *DAGService) logStats(metaPin, clusterDAGPin cid.Cid) {
duration := time.Since(dgs.startTime)
seconds := uint64(duration) / uint64(time.Second)
var rate string
@ -252,10 +252,10 @@ func (dgs *DAGService) sendOutput(ao *api.AddedOutput) {
}
// flushes the dgs.currentShard and returns the LastLink()
func (dgs *DAGService) flushCurrentShard(ctx context.Context) (*cid.Cid, error) {
func (dgs *DAGService) flushCurrentShard(ctx context.Context) (cid.Cid, error) {
shard := dgs.currentShard
if shard == nil {
return nil, errors.New("cannot flush a nil shard")
return cid.Undef, errors.New("cannot flush a nil shard")
}
lens := len(dgs.shards)

View File

@ -44,7 +44,7 @@ func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.PinSerial, out *[
return nil
}
func (rpcs *testRPC) PinGet(c *cid.Cid) (api.Pin, error) {
func (rpcs *testRPC) PinGet(c cid.Cid) (api.Pin, error) {
pI, ok := rpcs.pins.Load(c.String())
if !ok {
return api.Pin{}, errors.New("not found")
@ -52,7 +52,7 @@ func (rpcs *testRPC) PinGet(c *cid.Cid) (api.Pin, error) {
return pI.(api.PinSerial).ToPin(), nil
}
func (rpcs *testRPC) BlockGet(c *cid.Cid) ([]byte, error) {
func (rpcs *testRPC) BlockGet(c cid.Cid) ([]byte, error) {
bI, ok := rpcs.blocks.Load(c.String())
if !ok {
return nil, errors.New("not found")

View File

@ -22,7 +22,7 @@ type shard struct {
pinOptions api.PinOptions
// dagNode represents a node with links and will be converted
// to Cbor.
dagNode map[string]*cid.Cid
dagNode map[string]cid.Cid
currentSize uint64
sizeLimit uint64
}
@ -49,7 +49,7 @@ func newShard(ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard
rpc: rpc,
allocations: allocs,
pinOptions: opts,
dagNode: make(map[string]*cid.Cid),
dagNode: make(map[string]cid.Cid),
currentSize: 0,
sizeLimit: opts.ShardSize,
}, nil
@ -57,7 +57,7 @@ func newShard(ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard
// AddLink tries to add a new block to this shard if it's not full.
// Returns true if the block was added
func (sh *shard) AddLink(ctx context.Context, c *cid.Cid, s uint64) {
func (sh *shard) AddLink(ctx context.Context, c cid.Cid, s uint64) {
linkN := len(sh.dagNode)
linkName := fmt.Sprintf("%d", linkN)
logger.Debugf("shard: add link: %s", linkName)
@ -74,16 +74,16 @@ func (sh *shard) Allocations() []peer.ID {
// Flush completes the allocation of this shard by building a CBOR node
// and adding it to IPFS, then pinning it in cluster. It returns the Cid of the
// shard.
func (sh *shard) Flush(ctx context.Context, shardN int, prev *cid.Cid) (*cid.Cid, error) {
func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid, error) {
logger.Debugf("shard %d: flush", shardN)
nodes, err := makeDAG(ctx, sh.dagNode)
if err != nil {
return nil, err
return cid.Undef, err
}
err = putDAG(ctx, sh.rpc, nodes, sh.allocations)
if err != nil {
return nil, err
return cid.Undef, err
}
rootCid := nodes[0].Cid()
@ -123,7 +123,7 @@ func (sh *shard) Limit() uint64 {
// the last link of the last shard is the data root for the
// full sharded DAG (the CID that would have resulted from
// adding the content to a single IPFS daemon).
func (sh *shard) LastLink() *cid.Cid {
func (sh *shard) LastLink() cid.Cid {
l := len(sh.dagNode)
lastLink := fmt.Sprintf("%d", l-1)
return sh.dagNode[lastLink]

View File

@ -12,19 +12,19 @@ import (
// MockPinStore is used in VerifyShards
type MockPinStore interface {
// Gets a pin
PinGet(*cid.Cid) (api.Pin, error)
PinGet(cid.Cid) (api.Pin, error)
}
// MockBlockStore is used in VerifyShards
type MockBlockStore interface {
// Gets a block
BlockGet(*cid.Cid) ([]byte, error)
BlockGet(cid.Cid) ([]byte, error)
}
// VerifyShards checks that a sharded CID has been correctly formed and stored.
// This is a helper function for testing. It returns a map with all the blocks
// from all shards.
func VerifyShards(t *testing.T, rootCid *cid.Cid, pins MockPinStore, ipfs MockBlockStore, expectedShards int) (map[string]struct{}, error) {
func VerifyShards(t *testing.T, rootCid cid.Cid, pins MockPinStore, ipfs MockBlockStore, expectedShards int) (map[string]struct{}, error) {
metaPin, err := pins.PinGet(rootCid)
if err != nil {
return nil, fmt.Errorf("meta pin was not pinned: %s", err)
@ -62,7 +62,7 @@ func VerifyShards(t *testing.T, rootCid *cid.Cid, pins MockPinStore, ipfs MockBl
}
shardBlocks := make(map[string]struct{})
var ref *cid.Cid
var ref cid.Cid
// traverse shards in order
for i := 0; i < len(shards); i++ {
sh, _, err := clusterDAGNode.ResolveLink([]string{fmt.Sprintf("%d", i)})
@ -75,7 +75,7 @@ func VerifyShards(t *testing.T, rootCid *cid.Cid, pins MockPinStore, ipfs MockBl
return nil, fmt.Errorf("shard was not pinned: %s %s", sh.Cid, err)
}
if ref != nil && !shardPin.Reference.Equals(ref) {
if ref != cid.Undef && !shardPin.Reference.Equals(ref) {
t.Errorf("Ref (%s) should point to previous shard (%s)", ref, shardPin.Reference)
}
ref = shardPin.Cid

View File

@ -55,7 +55,7 @@ func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions)
"",
"Cluster",
"BlockAllocate",
api.PinWithOpts(nil, pinOpts).ToSerial(),
api.PinWithOpts(cid.Undef, pinOpts).ToSerial(),
&allocsStr,
)
return api.StringsToPeers(allocsStr), err
@ -87,12 +87,12 @@ type BaseDAGService struct {
}
// Get always returns errNotFound
func (dag BaseDAGService) Get(ctx context.Context, key *cid.Cid) (ipld.Node, error) {
func (dag BaseDAGService) Get(ctx context.Context, key cid.Cid) (ipld.Node, error) {
return nil, ErrDAGNotFound
}
// GetMany returns an output channel that always emits an error
func (dag BaseDAGService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *ipld.NodeOption {
func (dag BaseDAGService) GetMany(ctx context.Context, keys []cid.Cid) <-chan *ipld.NodeOption {
out := make(chan *ipld.NodeOption, 1)
out <- &ipld.NodeOption{Err: fmt.Errorf("failed to fetch all nodes")}
close(out)
@ -100,11 +100,11 @@ func (dag BaseDAGService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *
}
// Remove is a nop
func (dag BaseDAGService) Remove(ctx context.Context, key *cid.Cid) error {
func (dag BaseDAGService) Remove(ctx context.Context, key cid.Cid) error {
return nil
}
// RemoveMany is a nop
func (dag BaseDAGService) RemoveMany(ctx context.Context, keys []*cid.Cid) error {
func (dag BaseDAGService) RemoveMany(ctx context.Context, keys []cid.Cid) error {
return nil
}

View File

@ -44,7 +44,7 @@ import (
// into account if the given CID was previously in a "pin everywhere" mode,
// and will consider such Pins as currently unallocated ones, providing
// new allocations as available.
func (c *Cluster) allocate(hash *cid.Cid, rplMin, rplMax int, blacklist []peer.ID, prioritylist []peer.ID) ([]peer.ID, error) {
func (c *Cluster) allocate(hash cid.Cid, rplMin, rplMax int, blacklist []peer.ID, prioritylist []peer.ID) ([]peer.ID, error) {
if (rplMin + rplMax) == 0 {
return nil, fmt.Errorf("bad replication factors: %d/%d", rplMin, rplMax)
}
@ -97,7 +97,7 @@ func (c *Cluster) allocate(hash *cid.Cid, rplMin, rplMax int, blacklist []peer.I
}
// allocationError logs an allocation error
func allocationError(hash *cid.Cid, needed, wanted int, candidatesValid []peer.ID) error {
func allocationError(hash cid.Cid, needed, wanted int, candidatesValid []peer.ID) error {
logger.Errorf("Not enough candidates to allocate %s:", hash)
logger.Errorf(" Needed: %d", needed)
logger.Errorf(" Wanted: %d", wanted)
@ -114,7 +114,7 @@ func allocationError(hash *cid.Cid, needed, wanted int, candidatesValid []peer.I
}
func (c *Cluster) obtainAllocations(
hash *cid.Cid,
hash cid.Cid,
rplMin, rplMax int,
currentValidMetrics map[peer.ID]api.Metric,
candidatesMetrics map[peer.ID]api.Metric,
@ -167,7 +167,11 @@ func (c *Cluster) obtainAllocations(
// the allocator returns a list of peers ordered by priority
finalAllocs, err := c.allocator.Allocate(
hash, currentValidMetrics, candidatesMetrics, priorityMetrics)
hash,
currentValidMetrics,
candidatesMetrics,
priorityMetrics,
)
if err != nil {
return nil, logError(err.Error())
}

View File

@ -34,7 +34,7 @@ func (alloc AscendAllocator) Shutdown() error { return nil }
// carry a numeric value such as "used disk". We do not pay attention to
// the metrics of the currently allocated peers and we just sort the
// candidates based on their metric values (smallest to largest).
func (alloc AscendAllocator) Allocate(c *cid.Cid, current,
func (alloc AscendAllocator) Allocate(c cid.Cid, current,
candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, false)

View File

@ -34,7 +34,7 @@ func (alloc DescendAllocator) Shutdown() error { return nil }
// carry a numeric value such as "used disk". We do not pay attention to
// the metrics of the currently allocated peers and we just sort the
// candidates based on their metric values (largest to smallest).
func (alloc DescendAllocator) Allocate(c *cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error) {
func (alloc DescendAllocator) Allocate(c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error) {
// sort our metrics
first := util.SortNumeric(priority, true)
last := util.SortNumeric(candidates, true)

View File

@ -62,7 +62,7 @@ func (c *Client) PeerRm(id peer.ID) error {
// Pin tracks a Cid with the given replication factor and a name for
// human-friendliness.
func (c *Client) Pin(ci *cid.Cid, replicationFactorMin, replicationFactorMax int, name string) error {
func (c *Client) Pin(ci cid.Cid, replicationFactorMin, replicationFactorMax int, name string) error {
escName := url.QueryEscape(name)
err := c.do(
"POST",
@ -81,7 +81,7 @@ func (c *Client) Pin(ci *cid.Cid, replicationFactorMin, replicationFactorMax int
}
// Unpin untracks a Cid from cluster.
func (c *Client) Unpin(ci *cid.Cid) error {
func (c *Client) Unpin(ci cid.Cid) error {
return c.do("DELETE", fmt.Sprintf("/pins/%s", ci.String()), nil, nil, nil)
}
@ -118,7 +118,7 @@ func (c *Client) Allocations(filter api.PinType) ([]api.Pin, error) {
}
// Allocation returns the current allocations for a given Cid.
func (c *Client) Allocation(ci *cid.Cid) (api.Pin, error) {
func (c *Client) Allocation(ci cid.Cid) (api.Pin, error) {
var pin api.PinSerial
err := c.do("GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
return pin.ToPin(), err
@ -127,7 +127,7 @@ func (c *Client) Allocation(ci *cid.Cid) (api.Pin, error) {
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
func (c *Client) Status(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *Client) Status(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
var gpi api.GlobalPinInfoSerial
err := c.do("GET", fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
@ -147,7 +147,7 @@ func (c *Client) StatusAll(local bool) ([]api.GlobalPinInfo, error) {
// Sync makes sure the state of a Cid corresponds to the state reported by
// the ipfs daemon, and returns it. If local is true, this operation only
// happens on the current peer, otherwise it happens on every cluster peer.
func (c *Client) Sync(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *Client) Sync(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
var gpi api.GlobalPinInfoSerial
err := c.do("POST", fmt.Sprintf("/pins/%s/sync?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
@ -170,7 +170,7 @@ func (c *Client) SyncAll(local bool) ([]api.GlobalPinInfo, error) {
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
// If local is true, the operation is limited to the current peer, otherwise
// it happens on every cluster peer.
func (c *Client) Recover(ci *cid.Cid, local bool) (api.GlobalPinInfo, error) {
func (c *Client) Recover(ci cid.Cid, local bool) (api.GlobalPinInfo, error) {
var gpi api.GlobalPinInfoSerial
err := c.do("POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi.ToGlobalPinInfo(), err
@ -236,7 +236,7 @@ func (c *Client) WaitFor(ctx context.Context, fp StatusFilterParams) (api.Global
// StatusFilterParams contains the parameters required
// to filter a stream of status results.
type StatusFilterParams struct {
Cid *cid.Cid
Cid cid.Cid
Local bool
Target api.TrackerStatus
CheckFreq time.Duration

View File

@ -158,7 +158,7 @@ var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{
// GlobalPinInfo contains cluster-wide status information about a tracked Cid,
// indexed by cluster peer.
type GlobalPinInfo struct {
Cid *cid.Cid
Cid cid.Cid
PeerMap map[peer.ID]PinInfo
}
@ -171,7 +171,7 @@ type GlobalPinInfoSerial struct {
// ToSerial converts a GlobalPinInfo to its serializable version.
func (gpi GlobalPinInfo) ToSerial() GlobalPinInfoSerial {
s := GlobalPinInfoSerial{}
if gpi.Cid != nil {
if gpi.Cid.Defined() {
s.Cid = gpi.Cid.String()
}
s.PeerMap = make(map[string]PinInfoSerial)
@ -203,7 +203,7 @@ func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
// PinInfo holds information about local pins.
type PinInfo struct {
Cid *cid.Cid
Cid cid.Cid
Peer peer.ID
Status TrackerStatus
TS time.Time
@ -223,7 +223,7 @@ type PinInfoSerial struct {
// ToSerial converts a PinInfo to its serializable version.
func (pi PinInfo) ToSerial() PinInfoSerial {
c := ""
if pi.Cid != nil {
if pi.Cid.Defined() {
c = pi.Cid.String()
}
p := ""
@ -531,7 +531,7 @@ func (addrsS MultiaddrsSerial) ToMultiaddrs() []ma.Multiaddr {
}
// CidsToStrings encodes cid.Cids to strings.
func CidsToStrings(cids []*cid.Cid) []string {
func CidsToStrings(cids []cid.Cid) []string {
strs := make([]string, len(cids))
for i, c := range cids {
strs[i] = c.String()
@ -649,7 +649,7 @@ type PinOptions struct {
type Pin struct {
PinOptions
Cid *cid.Cid
Cid cid.Cid
// See PinType comments
Type PinType
@ -665,12 +665,12 @@ type Pin struct {
// ClusterDAGs, it is the MetaPin CID. For the
// MetaPin it is the ClusterDAG CID. For Shards,
// it is the previous shard CID.
Reference *cid.Cid
Reference cid.Cid
}
// PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to
// be recursive and the pin to be of DataType.
func PinCid(c *cid.Cid) Pin {
func PinCid(c cid.Cid) Pin {
return Pin{
Cid: c,
Type: DataType,
@ -681,7 +681,7 @@ func PinCid(c *cid.Cid) Pin {
// PinWithOpts creates a new Pin calling PinCid(c) and then sets
// its PinOptions fields with the given options.
func PinWithOpts(c *cid.Cid, opts PinOptions) Pin {
func PinWithOpts(c cid.Cid, opts PinOptions) Pin {
p := PinCid(c)
p.ReplicationFactorMin = opts.ReplicationFactorMin
p.ReplicationFactorMax = opts.ReplicationFactorMax
@ -704,11 +704,11 @@ type PinSerial struct {
// ToSerial converts a Pin to PinSerial.
func (pin Pin) ToSerial() PinSerial {
c := ""
if pin.Cid != nil {
if pin.Cid.Defined() {
c = pin.Cid.String()
}
ref := ""
if pin.Reference != nil {
if pin.Reference.Defined() {
ref = pin.Reference.String()
}
@ -800,7 +800,7 @@ func (pins PinSerial) ToPin() Pin {
if err != nil {
logger.Debug(pins.Cid, err)
}
var ref *cid.Cid
var ref cid.Cid
if pins.Reference != "" {
ref, err = cid.Decode(pins.Reference)
if err != nil {

View File

@ -754,12 +754,12 @@ func (c *Cluster) StatusAllLocal() []api.PinInfo {
// Status returns the GlobalPinInfo for a given Cid as fetched from all
// current peers. If an error happens, the GlobalPinInfo should contain
// as much information as could be fetched from the other peers.
func (c *Cluster) Status(h *cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Status(h cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerStatus", h)
}
// StatusLocal returns this peer's PinInfo for a given Cid.
func (c *Cluster) StatusLocal(h *cid.Cid) api.PinInfo {
func (c *Cluster) StatusLocal(h cid.Cid) api.PinInfo {
return c.tracker.Status(h)
}
@ -789,14 +789,14 @@ func (c *Cluster) SyncAllLocal() ([]api.PinInfo, error) {
// Sync triggers a SyncLocal() operation for a given Cid.
// in all cluster peers.
func (c *Cluster) Sync(h *cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Sync(h cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("SyncLocal", h)
}
// used for RecoverLocal and SyncLocal.
func (c *Cluster) localPinInfoOp(
h *cid.Cid,
f func(*cid.Cid) (api.PinInfo, error),
h cid.Cid,
f func(cid.Cid) (api.PinInfo, error),
) (pInfo api.PinInfo, err error) {
cids, err := c.cidsFromMetaPin(h)
if err != nil {
@ -819,7 +819,7 @@ func (c *Cluster) localPinInfoOp(
// SyncLocal performs a local sync operation for the given Cid. This will
// tell the tracker to verify the status of the Cid against the IPFS daemon.
// It returns the updated PinInfo for the Cid.
func (c *Cluster) SyncLocal(h *cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) SyncLocal(h cid.Cid) (pInfo api.PinInfo, err error) {
return c.localPinInfoOp(h, c.tracker.Sync)
}
@ -831,13 +831,13 @@ func (c *Cluster) RecoverAllLocal() ([]api.PinInfo, error) {
// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(h *cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) Recover(h cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("TrackerRecover", h)
}
// RecoverLocal triggers a recover operation for a given Cid in this peer only.
// It returns the updated PinInfo, after recovery.
func (c *Cluster) RecoverLocal(h *cid.Cid) (pInfo api.PinInfo, err error) {
func (c *Cluster) RecoverLocal(h cid.Cid) (pInfo api.PinInfo, err error) {
return c.localPinInfoOp(h, c.tracker.Recover)
}
@ -861,7 +861,7 @@ func (c *Cluster) Pins() []api.Pin {
// assigned for the requested Cid, but does not indicate if
// the item is successfully pinned. For that, use Status(). PinGet
// returns an error if the given Cid is not part of the global state.
func (c *Cluster) PinGet(h *cid.Cid) (api.Pin, error) {
func (c *Cluster) PinGet(h cid.Cid) (api.Pin, error) {
st, err := c.consensus.State()
if err != nil {
return api.PinCid(h), err
@ -914,7 +914,7 @@ func (c *Cluster) setupReplicationFactor(pin *api.Pin) error {
func checkPinType(pin *api.Pin) error {
switch pin.Type {
case api.DataType:
if pin.Reference != nil {
if pin.Reference != cid.Undef {
return errors.New("data pins should not reference other pins")
}
case api.ShardType:
@ -932,14 +932,14 @@ func checkPinType(pin *api.Pin) error {
if pin.MaxDepth != 0 {
return errors.New("must pin roots directly")
}
if pin.Reference == nil {
if pin.Reference == cid.Undef {
return errors.New("clusterDAG pins should reference a Meta pin")
}
case api.MetaType:
if pin.Allocations != nil && len(pin.Allocations) != 0 {
return errors.New("meta pin should not specify allocations")
}
if pin.Reference == nil {
if pin.Reference == cid.Undef {
return errors.New("metaPins should reference a ClusterDAG")
}
@ -970,7 +970,7 @@ func (c *Cluster) setupPin(pin *api.Pin) error {
// to the consensus layer or skipped (due to error or to the fact
// that it was already valid).
func (c *Cluster) pin(pin api.Pin, blacklist []peer.ID, prioritylist []peer.ID) (bool, error) {
if pin.Cid == nil {
if pin.Cid == cid.Undef {
return false, errors.New("bad pin object")
}
@ -1016,7 +1016,7 @@ func (c *Cluster) pin(pin api.Pin, blacklist []peer.ID, prioritylist []peer.ID)
// Unpin returns an error if the operation could not be persisted
// to the global state. Unpin does not reflect the success or failure
// of underlying IPFS daemon unpinning operations.
func (c *Cluster) Unpin(h *cid.Cid) error {
func (c *Cluster) Unpin(h cid.Cid) error {
logger.Info("IPFS cluster unpinning:", h)
pin, err := c.PinGet(h)
if err != nil {
@ -1069,7 +1069,7 @@ func (c *Cluster) unpinClusterDag(metaPin api.Pin) error {
// pipeline is used to DAGify the file. Depending on input parameters this
// DAG can be added locally to the calling cluster peer's ipfs repo, or
// sharded across the entire cluster.
func (c *Cluster) AddFile(reader *multipart.Reader, params *api.AddParams) (*cid.Cid, error) {
func (c *Cluster) AddFile(reader *multipart.Reader, params *api.AddParams) (cid.Cid, error) {
var dags adder.ClusterDAGService
if params.Shard {
dags = sharding.New(c.rpcClient, params.PinOptions, nil)
@ -1122,7 +1122,7 @@ func (c *Cluster) Peers() []api.ID {
return peers
}
func (c *Cluster) globalPinInfoCid(method string, h *cid.Cid) (api.GlobalPinInfo, error) {
func (c *Cluster) globalPinInfoCid(method string, h cid.Cid) (api.GlobalPinInfo, error) {
pin := api.GlobalPinInfo{
Cid: h,
PeerMap: make(map[peer.ID]api.PinInfo),
@ -1277,13 +1277,13 @@ func (c *Cluster) getIDForPeer(pid peer.ID) (api.ID, error) {
// that order (the MetaPin is the last element).
// It returns a slice with only the given Cid if it's not a known Cid or not a
// MetaPin.
func (c *Cluster) cidsFromMetaPin(h *cid.Cid) ([]*cid.Cid, error) {
func (c *Cluster) cidsFromMetaPin(h cid.Cid) ([]cid.Cid, error) {
cState, err := c.consensus.State()
if err != nil {
return nil, err
}
list := []*cid.Cid{h}
list := []cid.Cid{h}
pin, ok := cState.Get(h)
if !ok {
@ -1294,7 +1294,7 @@ func (c *Cluster) cidsFromMetaPin(h *cid.Cid) ([]*cid.Cid, error) {
return list, nil
}
list = append([]*cid.Cid{pin.Reference}, list...)
list = append([]cid.Cid{pin.Reference}, list...)
clusterDagPin, err := c.PinGet(pin.Reference)
if err != nil {
return list, fmt.Errorf("could not get clusterDAG pin from state. Malformed pin?: %s", err)
@ -1310,7 +1310,7 @@ func (c *Cluster) cidsFromMetaPin(h *cid.Cid) ([]*cid.Cid, error) {
return list, fmt.Errorf("error parsing clusterDAG block: %s", err)
}
for _, l := range clusterDagNode.Links() {
list = append([]*cid.Cid{l.Cid}, list...)
list = append([]cid.Cid{l.Cid}, list...)
}
return list, nil

View File

@ -54,17 +54,17 @@ func (ipfs *mockConnector) ID() (api.IPFSID, error) {
}, nil
}
func (ipfs *mockConnector) Pin(ctx context.Context, c *cid.Cid, maxDepth int) error {
func (ipfs *mockConnector) Pin(ctx context.Context, c cid.Cid, maxDepth int) error {
ipfs.pins.Store(c.String(), maxDepth)
return nil
}
func (ipfs *mockConnector) Unpin(ctx context.Context, c *cid.Cid) error {
func (ipfs *mockConnector) Unpin(ctx context.Context, c cid.Cid) error {
ipfs.pins.Delete(c.String())
return nil
}
func (ipfs *mockConnector) PinLsCid(ctx context.Context, c *cid.Cid) (api.IPFSPinStatus, error) {
func (ipfs *mockConnector) PinLsCid(ctx context.Context, c cid.Cid) (api.IPFSPinStatus, error) {
dI, ok := ipfs.pins.Load(c.String())
if !ok {
return api.IPFSPinStatusUnpinned, nil
@ -110,7 +110,7 @@ func (ipfs *mockConnector) BlockPut(nwm api.NodeWithMeta) error {
return nil
}
func (ipfs *mockConnector) BlockGet(c *cid.Cid) ([]byte, error) {
func (ipfs *mockConnector) BlockGet(c cid.Cid) ([]byte, error) {
d, ok := ipfs.blocks.Load(c.String())
if !ok {
return nil, errors.New("block not found")
@ -338,7 +338,7 @@ func TestUnpinShard(t *testing.T) {
sharding.VerifyShards(t, root, cl, cl.ipfs, 14)
// skipping errors, VerifyShards has checked
pinnedCids := []*cid.Cid{}
pinnedCids := []cid.Cid{}
pinnedCids = append(pinnedCids, root)
metaPin, _ := cl.PinGet(root)
cDag, _ := cl.PinGet(metaPin.Reference)
@ -394,10 +394,10 @@ func TestUnpinShard(t *testing.T) {
// cShard, _ := cid.Decode(test.TestShardCid)
// cCdag, _ := cid.Decode(test.TestCdagCid)
// cMeta, _ := cid.Decode(test.TestMetaRootCid)
// pinMeta(t, cl, []*cid.Cid{cShard}, cCdag, cMeta)
// pinMeta(t, cl, []cid.Cid{cShard}, cCdag, cMeta)
// }
// func pinMeta(t *testing.T, cl *Cluster, shardCids []*cid.Cid, cCdag, cMeta *cid.Cid) {
// func pinMeta(t *testing.T, cl *Cluster, shardCids []cid.Cid, cCdag, cMeta cid.Cid) {
// for _, cShard := range shardCids {
// shardPin := api.Pin{
// Cid: cShard,
@ -499,7 +499,7 @@ func TestUnpinShard(t *testing.T) {
// cShard2, _ := cid.Decode(test.TestShardCid2)
// cCdag2, _ := cid.Decode(test.TestCdagCid2)
// cMeta2, _ := cid.Decode(test.TestMetaRootCid2)
// pinMeta(t, cl, []*cid.Cid{cShard, cShard2}, cCdag2, cMeta2)
// pinMeta(t, cl, []cid.Cid{cShard, cShard2}, cCdag2, cMeta2)
// shardPin, err := cl.PinGet(cShard)
// if err != nil {

View File

@ -31,7 +31,7 @@ func consensusAddr(c *Consensus) ma.Multiaddr {
return cAddr
}
func testPin(c *cid.Cid) api.Pin {
func testPin(c cid.Cid) api.Pin {
p := api.PinCid(c)
p.ReplicationFactorMin = -1
p.ReplicationFactorMax = -1

View File

@ -859,7 +859,7 @@ func parseCredentials(userInput string) (string, string) {
func handlePinResponseFormatFlags(
c *cli.Context,
ci *cid.Cid,
ci cid.Cid,
target api.TrackerStatus,
) {
@ -875,7 +875,7 @@ func handlePinResponseFormatFlags(
return
}
if status.Cid == nil { // no status from "wait"
if status.Cid == cid.Undef { // no status from "wait"
time.Sleep(time.Second)
status, cerr = globalClient.Status(ci, false)
}
@ -883,7 +883,7 @@ func handlePinResponseFormatFlags(
}
func waitFor(
ci *cid.Cid,
ci cid.Cid,
target api.TrackerStatus,
timeout time.Duration,
) (api.GlobalPinInfo, error) {

View File

@ -72,9 +72,9 @@ type API interface {
type IPFSConnector interface {
Component
ID() (api.IPFSID, error)
Pin(context.Context, *cid.Cid, int) error
Unpin(context.Context, *cid.Cid) error
PinLsCid(context.Context, *cid.Cid) (api.IPFSPinStatus, error)
Pin(context.Context, cid.Cid, int) error
Unpin(context.Context, cid.Cid) error
PinLsCid(context.Context, cid.Cid) (api.IPFSPinStatus, error)
PinLs(ctx context.Context, typeFilter string) (map[string]api.IPFSPinStatus, error)
// ConnectSwarms make sure this peer's IPFS daemon is connected to
// other peers IPFS daemons.
@ -90,7 +90,7 @@ type IPFSConnector interface {
// BlockPut directly adds a block of data to the IPFS repo
BlockPut(api.NodeWithMeta) error
// BlockGet retrieves the raw data of an IPFS block
BlockGet(*cid.Cid) ([]byte, error)
BlockGet(cid.Cid) ([]byte, error)
}
// Peered represents a component which needs to be aware of the peers
@ -111,21 +111,21 @@ type PinTracker interface {
Track(api.Pin) error
// Untrack tells the tracker that a Cid is to be forgotten. The tracker
// may perform an IPFS unpin operation.
Untrack(*cid.Cid) error
Untrack(cid.Cid) error
// StatusAll returns the list of pins with their local status.
StatusAll() []api.PinInfo
// Status returns the local status of a given Cid.
Status(*cid.Cid) api.PinInfo
Status(cid.Cid) api.PinInfo
// SyncAll makes sure that all tracked Cids reflect the real IPFS status.
// It returns the list of pins which were updated by the call.
SyncAll() ([]api.PinInfo, error)
// Sync makes sure that the Cid status reflect the real IPFS status.
// It returns the local status of the Cid.
Sync(*cid.Cid) (api.PinInfo, error)
Sync(cid.Cid) (api.PinInfo, error)
// RecoverAll calls Recover() for all pins tracked.
RecoverAll() ([]api.PinInfo, error)
// Recover retriggers a Pin/Unpin operation in a Cids with error status.
Recover(*cid.Cid) (api.PinInfo, error)
Recover(cid.Cid) (api.PinInfo, error)
}
// Informer provides Metric information from a peer. The metrics produced by
@ -150,7 +150,7 @@ type PinAllocator interface {
// which are currently pinning the content. The candidates map
// contains the metrics for all peers which are eligible for pinning
// the content.
Allocate(c *cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error)
Allocate(c cid.Cid, current, candidates, priority map[peer.ID]api.Metric) ([]peer.ID, error)
}
// PeerMonitor is a component in charge of publishing a peer's metrics and

View File

@ -589,7 +589,7 @@ func (ipfs *Connector) ID() (api.IPFSID, error) {
// Pin performs a pin request against the configured IPFS
// daemon.
func (ipfs *Connector) Pin(ctx context.Context, hash *cid.Cid, maxDepth int) error {
func (ipfs *Connector) Pin(ctx context.Context, hash cid.Cid, maxDepth int) error {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.PinTimeout)
defer cancel()
pinStatus, err := ipfs.PinLsCid(ctx, hash)
@ -634,7 +634,7 @@ func (ipfs *Connector) Pin(ctx context.Context, hash *cid.Cid, maxDepth int) err
// Unpin performs an unpin request against the configured IPFS
// daemon.
func (ipfs *Connector) Unpin(ctx context.Context, hash *cid.Cid) error {
func (ipfs *Connector) Unpin(ctx context.Context, hash cid.Cid) error {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.UnpinTimeout)
defer cancel()
@ -686,7 +686,7 @@ func (ipfs *Connector) PinLs(ctx context.Context, typeFilter string) (map[string
// PinLsCid performs a "pin ls <hash>" request. It first tries with
// "type=recursive" and then, if not found, with "type=direct". It returns an
// api.IPFSPinStatus for that hash.
func (ipfs *Connector) PinLsCid(ctx context.Context, hash *cid.Cid) (api.IPFSPinStatus, error) {
func (ipfs *Connector) PinLsCid(ctx context.Context, hash cid.Cid) (api.IPFSPinStatus, error) {
pinLsType := func(pinType string) ([]byte, error) {
ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
@ -961,7 +961,7 @@ func (ipfs *Connector) BlockPut(b api.NodeWithMeta) error {
}
// BlockGet retrieves an ipfs block with the given cid
func (ipfs *Connector) BlockGet(c *cid.Cid) ([]byte, error) {
func (ipfs *Connector) BlockGet(c cid.Cid) ([]byte, error) {
ctx, cancel := context.WithTimeout(ipfs.ctx, ipfs.config.IPFSRequestTimeout)
defer cancel()
url := "block/get?arg=" + c.String()

View File

@ -15,15 +15,15 @@
},
{
"author": "whyrusleeping",
"hash": "QmUDzeFgYrRmHL2hUB6NZmqcBVQtUzETwmFRUc9onfSSHr",
"hash": "QmUEqyXr97aUbNmQADHYNknjwjjdVpJXEt1UZXmSG81EV4",
"name": "go-libp2p",
"version": "6.0.8"
"version": "6.0.12"
},
{
"author": "hsanjuan",
"hash": "Qma1GvD8sVdAaawmhh6rCM9fhqHuxoXZqLDPJNmD1YK8mZ",
"hash": "QmaR1KVXHKDyysrMUCT1WKbu9fkFs12U7nLDPhfkswonzj",
"name": "go-libp2p-raft",
"version": "1.2.12"
"version": "1.2.13"
},
{
"author": "urfave",
@ -45,9 +45,9 @@
},
{
"author": "hsanjuan",
"hash": "QmW56LEPdtdETprHG5h5qcJpp6qNYuytzyqbEctGdbGfSF",
"hash": "QmXyteEWrYHVJFEA8oX9cSfRp6PJ2kiVsmsFqPMi9ue1Ek",
"name": "go-libp2p-gorpc",
"version": "1.0.17"
"version": "1.0.18"
},
{
"author": "libp2p",
@ -69,27 +69,27 @@
},
{
"author": "hsanjuan",
"hash": "QmeyKL7WDSPhnhozCB3oC51j5pDs7DnCGWPyVaxgwpncA6",
"hash": "Qmc6tqtdKn1fVCGmU2StfULdXb8xPxmGh19NsYsgVkqjbw",
"name": "go-libp2p-http",
"version": "1.1.0"
"version": "1.1.1"
},
{
"author": "ipfs",
"hash": "QmbjmbWbDCTKGba7zvtVx7qyyegziT3KWRtVN9mPaysmcF",
"hash": "QmR8y7XSkmWSpae9vm7YRES6Bz93pTXX1abeSVKDuNEFeq",
"name": "go-ipfs-api",
"version": "1.3.5"
"version": "1.3.6"
},
{
"author": "whyrusleeping",
"hash": "QmPqk5grMduFU1Cj5onoB4GyV9XRsSG82KtX4wQs8Gd3XA",
"hash": "QmY1L5krVk8dv8d74uESmJTXGpoigVYqBVxXXz1aS8aFSb",
"name": "go-libp2p-floodsub",
"version": "100.9.24"
"version": "0.9.28"
},
{
"author": "whyrusleeping",
"hash": "QmYjnkEL7i731PirfVH1sis89evN7jt4otSHw5D2xXXwUV",
"hash": "QmPSQnBKM9g7BaUcZCvswUJVscQ1ipjmwxN5PXCjkp9EQ7",
"name": "go-cid",
"version": "0.7.25"
"version": "0.9.0"
},
{
"author": "satori",
@ -99,15 +99,15 @@
},
{
"author": "hsanjuan",
"hash": "QmWbCAB5f3LDumj4ncz1UCHSiyXrXxkMxZB6Wv35xi4P8z",
"hash": "QmdSeG9s4EQ9TGruJJS9Us38TQDZtMmFGwzTYUDVqNTURm",
"name": "go-ipfs-chunker",
"version": "0.0.10"
"version": "0.1.0"
},
{
"author": "hector",
"hash": "QmdBpJ5VTfL79VwKDU93z7fyZJ3mm4UaBHrE73CWRw2Bjd",
"hash": "QmPG32VXR5jmpo9q8R9FNdR4Ae97Ky9CiZE6SctJLUB79H",
"name": "go-ipfs-posinfo",
"version": "0.0.5"
"version": "0.1.0"
},
{
"author": "dustin",
@ -117,20 +117,20 @@
},
{
"author": "why",
"hash": "QmVKnUHAik1RuY38k775fuS9Um4jTNFYrNXxpPAmbUq6JW",
"hash": "QmPL8bYtbACcSFFiSr4s2du7Na382NxRADR8hC7D9FkEA2",
"name": "go-unixfs",
"version": "1.0.11"
"version": "1.1.1"
},
{
"author": "why",
"hash": "QmQzSpSjkdGHW6WFBhUG6P3t9K8yv7iucucT1cQaqJ6tgd",
"hash": "QmXv5mwmQ74r4aiHcNeQ4GAmfB3aWJuqaE4WyDfDfvkgLM",
"name": "go-merkledag",
"version": "1.0.9"
"version": "1.1.1"
},
{
"hash": "QmdP3wKxB6x6vJ57tDrewAJF2qv4ULejCZ6dspJRnk3993",
"hash": "QmaXYSwxqJsX3EoGb1ZV2toZ9fXc8hWJPaBW1XAp1h2Tsp",
"name": "go-libp2p-kad-dht",
"version": "4.3.4"
"version": "4.4.0"
},
{
"hash": "Qmbq7kGxgcpALGLPaWDyTa6KUq5kBUKdEvkvPZcBkJoLex",
@ -144,9 +144,9 @@
},
{
"author": "hsanjuan",
"hash": "QmZJMX4qjdLMKKPFcZN8PgkASZK9JHG9P8vZAfbiKyhzxv",
"hash": "QmRkrpnhZqDxTxwGCsDbuZMr7uCFZHH6SGfrcjgEQwxF3t",
"name": "go-mfs",
"version": "0.0.2"
"version": "0.1.1"
}
],
"gxVersion": "0.11.0",

View File

@ -367,7 +367,7 @@ func TestClustersPeerRemoveReallocsPins(t *testing.T) {
// At this point, all peers must have nClusters -1 pins
// associated to them.
// Find out which pins are associated to the leader.
interestingCids := []*cid.Cid{}
interestingCids := []cid.Cid{}
pins := leader.Pins()
if len(pins) != nClusters {

View File

@ -213,14 +213,14 @@ func (mpt *MapPinTracker) Track(c api.Pin) error {
// Untrack tells the MapPinTracker to stop managing a Cid.
// If the Cid is pinned locally, it will be unpinned.
func (mpt *MapPinTracker) Untrack(c *cid.Cid) error {
func (mpt *MapPinTracker) Untrack(c cid.Cid) error {
logger.Debugf("untracking %s", c)
return mpt.enqueue(api.PinCid(c), optracker.OperationUnpin, mpt.unpinCh)
}
// Status returns information for a Cid tracked by this
// MapPinTracker.
func (mpt *MapPinTracker) Status(c *cid.Cid) api.PinInfo {
func (mpt *MapPinTracker) Status(c cid.Cid) api.PinInfo {
return mpt.optracker.Get(c)
}
@ -238,7 +238,7 @@ func (mpt *MapPinTracker) StatusAll() []api.PinInfo {
// Pins in error states can be recovered with Recover().
// An error is returned if we are unable to contact
// the IPFS daemon.
func (mpt *MapPinTracker) Sync(c *cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Sync(c cid.Cid) (api.PinInfo, error) {
var ips api.IPFSPinStatus
err := mpt.rpcClient.Call(
"",
@ -310,7 +310,7 @@ func (mpt *MapPinTracker) SyncAll() ([]api.PinInfo, error) {
return results, nil
}
func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
func (mpt *MapPinTracker) syncStatus(c cid.Cid, ips api.IPFSPinStatus) api.PinInfo {
status, ok := mpt.optracker.Status(c)
if !ok {
status = api.TrackerStatusUnpinned
@ -370,7 +370,7 @@ func (mpt *MapPinTracker) syncStatus(c *cid.Cid, ips api.IPFSPinStatus) api.PinI
// Recover will re-queue a Cid in error state for the failed operation,
// possibly retriggering an IPFS pinning operation.
func (mpt *MapPinTracker) Recover(c *cid.Cid) (api.PinInfo, error) {
func (mpt *MapPinTracker) Recover(c cid.Cid) (api.PinInfo, error) {
logger.Infof("Attempting to recover %s", c)
pInfo := mpt.optracker.Get(c)
var err error
@ -407,6 +407,6 @@ func (mpt *MapPinTracker) SetClient(c *rpc.Client) {
// OpContext exports the internal optracker's OpContext method.
// For testing purposes only.
func (mpt *MapPinTracker) OpContext(c *cid.Cid) context.Context {
func (mpt *MapPinTracker) OpContext(c cid.Cid) context.Context {
return mpt.optracker.OpContext(c)
}

View File

@ -59,7 +59,7 @@ func (mock *mockService) IPFSUnpin(ctx context.Context, in api.PinSerial, out *s
return nil
}
func testPin(c *cid.Cid, min, max int, allocs ...peer.ID) api.Pin {
func testPin(c cid.Cid, min, max int, allocs ...peer.ID) api.Pin {
pin := api.PinCid(c)
pin.ReplicationFactorMin = min
pin.ReplicationFactorMax = max

View File

@ -80,7 +80,7 @@ func NewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase)
}
// Cid returns the Cid associated to this operation.
func (op *Operation) Cid() *cid.Cid {
func (op *Operation) Cid() cid.Cid {
op.mu.RLock()
defer op.mu.RUnlock()
return op.pin.Cid

View File

@ -78,7 +78,7 @@ func (opt *OperationTracker) Clean(op *Operation) {
// Status returns the TrackerStatus associated to the last operation known
// with the given Cid. It returns false if we are not tracking any operation
// for the given Cid.
func (opt *OperationTracker) Status(c *cid.Cid) (api.TrackerStatus, bool) {
func (opt *OperationTracker) Status(c cid.Cid) (api.TrackerStatus, bool) {
opt.mu.RLock()
defer opt.mu.RUnlock()
op, ok := opt.operations[c.String()]
@ -92,7 +92,7 @@ func (opt *OperationTracker) Status(c *cid.Cid) (api.TrackerStatus, bool) {
// SetError transitions an operation for a Cid into PhaseError if its Status
// is PhaseDone. Any other phases are considered in-flight and not touched.
// For things already in error, the error message is updated.
func (opt *OperationTracker) SetError(c *cid.Cid, err error) {
func (opt *OperationTracker) SetError(c cid.Cid, err error) {
opt.mu.Lock()
defer opt.mu.Unlock()
op, ok := opt.operations[c.String()]
@ -109,7 +109,7 @@ func (opt *OperationTracker) SetError(c *cid.Cid, err error) {
func (opt *OperationTracker) unsafePinInfo(op *Operation) api.PinInfo {
if op == nil {
return api.PinInfo{
Cid: nil,
Cid: cid.Undef,
Peer: opt.pid,
Status: api.TrackerStatusUnpinned,
TS: time.Now(),
@ -127,12 +127,12 @@ func (opt *OperationTracker) unsafePinInfo(op *Operation) api.PinInfo {
}
// Get returns a PinInfo object for Cid.
func (opt *OperationTracker) Get(c *cid.Cid) api.PinInfo {
func (opt *OperationTracker) Get(c cid.Cid) api.PinInfo {
opt.mu.RLock()
defer opt.mu.RUnlock()
op := opt.operations[c.String()]
pInfo := opt.unsafePinInfo(op)
if pInfo.Cid == nil {
if !pInfo.Cid.Defined() {
pInfo.Cid = c
}
return pInfo
@ -140,7 +140,7 @@ func (opt *OperationTracker) Get(c *cid.Cid) api.PinInfo {
// GetExists returns a PinInfo object for a Cid only if there exists
// an associated Operation.
func (opt *OperationTracker) GetExists(c *cid.Cid) (api.PinInfo, bool) {
func (opt *OperationTracker) GetExists(c cid.Cid) (api.PinInfo, bool) {
opt.mu.RLock()
defer opt.mu.RUnlock()
op, ok := opt.operations[c.String()]
@ -164,7 +164,7 @@ func (opt *OperationTracker) GetAll() []api.PinInfo {
// CleanError removes the associated Operation, if it is
// in PhaseError.
func (opt *OperationTracker) CleanError(c *cid.Cid) {
func (opt *OperationTracker) CleanError(c cid.Cid) {
opt.mu.RLock()
defer opt.mu.RUnlock()
errop, ok := opt.operations[c.String()]
@ -192,7 +192,7 @@ func (opt *OperationTracker) CleanAllDone() {
}
// OpContext gets the context of an operation, if any.
func (opt *OperationTracker) OpContext(c *cid.Cid) context.Context {
func (opt *OperationTracker) OpContext(c cid.Cid) context.Context {
opt.mu.RLock()
defer opt.mu.RUnlock()
op, ok := opt.operations[c.String()]

View File

@ -305,7 +305,7 @@ func BenchmarkPinTracker_Track(b *testing.B) {
func TestPinTracker_Untrack(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -473,7 +473,7 @@ func BenchmarkPinTracker_StatusAll(b *testing.B) {
func TestPinTracker_Status(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -577,7 +577,7 @@ func TestPinTracker_Status(t *testing.T) {
func TestPinTracker_SyncAll(t *testing.T) {
type args struct {
cs []*cid.Cid
cs []cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -589,7 +589,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
{
"basic stateless syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},
@ -610,7 +610,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
{
"basic map syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},
@ -631,7 +631,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
{
"slow stateless syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},
@ -652,7 +652,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
{
"slow map syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},
@ -708,7 +708,7 @@ func TestPinTracker_SyncAll(t *testing.T) {
func TestPinTracker_Sync(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -887,7 +887,7 @@ func TestPinTracker_RecoverAll(t *testing.T) {
func TestPinTracker_Recover(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -938,7 +938,7 @@ func TestPinTracker_Recover(t *testing.T) {
func TestUntrackTrack(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {
@ -991,7 +991,7 @@ func TestUntrackTrack(t *testing.T) {
func TestTrackUntrackWithCancel(t *testing.T) {
type args struct {
c *cid.Cid
c cid.Cid
tracker ipfscluster.PinTracker
}
tests := []struct {

View File

@ -236,7 +236,7 @@ func (spt *Tracker) Track(c api.Pin) error {
// Untrack tells the StatelessPinTracker to stop managing a Cid.
// If the Cid is pinned locally, it will be unpinned.
func (spt *Tracker) Untrack(c *cid.Cid) error {
func (spt *Tracker) Untrack(c cid.Cid) error {
logger.Debugf("untracking %s", c)
return spt.enqueue(api.PinCid(c), optracker.OperationUnpin)
}
@ -264,7 +264,7 @@ func (spt *Tracker) StatusAll() []api.PinInfo {
}
// Status returns information for a Cid pinned to the local IPFS node.
func (spt *Tracker) Status(c *cid.Cid) api.PinInfo {
func (spt *Tracker) Status(c cid.Cid) api.PinInfo {
// check if c has an inflight operation or errorred operation in optracker
if oppi, ok := spt.optracker.GetExists(c); ok {
// if it does return the status of the operation
@ -379,7 +379,7 @@ func (spt *Tracker) SyncAll() ([]api.PinInfo, error) {
}
// Sync returns the updated local status for the given Cid.
func (spt *Tracker) Sync(c *cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Sync(c cid.Cid) (api.PinInfo, error) {
oppi, ok := spt.optracker.GetExists(c)
if !ok {
return spt.Status(c), nil
@ -480,7 +480,7 @@ func (spt *Tracker) RecoverAll() ([]api.PinInfo, error) {
// Recover will re-track or re-untrack a Cid in error state,
// possibly retriggering an IPFS pinning operation and returning
// only when it is done.
func (spt *Tracker) Recover(c *cid.Cid) (api.PinInfo, error) {
func (spt *Tracker) Recover(c cid.Cid) (api.PinInfo, error) {
logger.Infof("Attempting to recover %s", c)
pInfo, ok := spt.optracker.GetExists(c)
if !ok {
@ -600,6 +600,6 @@ func (spt *Tracker) getErrorsAll() []api.PinInfo {
// OpContext exports the internal optracker's OpContext method.
// For testing purposes only.
func (spt *Tracker) OpContext(c *cid.Cid) context.Context {
func (spt *Tracker) OpContext(c cid.Cid) context.Context {
return spt.optracker.OpContext(c)
}

View File

@ -238,7 +238,7 @@ func TestTrackUntrackWithNoCancel(t *testing.T) {
}
pi := spt.optracker.Get(fastPin.Cid)
if pi.Cid == nil {
if pi.Cid == cid.Undef {
t.Error("fastPin should have been removed from tracker")
}
}
@ -269,7 +269,7 @@ func TestUntrackTrackWithCancel(t *testing.T) {
time.Sleep(100 * time.Millisecond)
pi := spt.optracker.Get(slowPin.Cid)
if pi.Cid == nil {
if pi.Cid == cid.Undef {
t.Fatal("expected slowPin to be tracked")
}
@ -328,7 +328,7 @@ func TestUntrackTrackWithNoCancel(t *testing.T) {
}
pi := spt.optracker.Get(fastPin.Cid)
if pi.Cid == nil {
if pi.Cid == cid.Undef {
t.Fatal("c untrack operation should be tracked")
}
@ -355,7 +355,7 @@ var sortPinInfoByCid = func(p []api.PinInfo) {
func TestStatelessTracker_SyncAll(t *testing.T) {
type args struct {
cs []*cid.Cid
cs []cid.Cid
tracker *Tracker
}
tests := []struct {
@ -367,7 +367,7 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
{
"basic stateless syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},
@ -388,7 +388,7 @@ func TestStatelessTracker_SyncAll(t *testing.T) {
{
"slow stateless syncall",
args{
[]*cid.Cid{
[]cid.Cid{
test.MustDecodeCid(test.TestCid1),
test.MustDecodeCid(test.TestCid2),
},

View File

@ -7,6 +7,7 @@ import (
"io"
cid "github.com/ipfs/go-cid"
"github.com/ipfs/ipfs-cluster/api"
)
@ -16,13 +17,13 @@ type State interface {
// Add adds a pin to the State
Add(api.Pin) error
// Rm removes a pin from the State
Rm(*cid.Cid) error
Rm(cid.Cid) error
// List lists all the pins in the state
List() []api.Pin
// Has returns true if the state is holding information for a Cid
Has(*cid.Cid) bool
Has(cid.Cid) bool
// Get returns the information attacthed to this pin
Get(*cid.Cid) (api.Pin, bool)
Get(cid.Cid) (api.Pin, bool)
// Migrate restores the serialized format of an outdated state to the current version
Migrate(r io.Reader) error
// Return the version of this state

View File

@ -48,7 +48,7 @@ func (st *MapState) Add(c api.Pin) error {
}
// Rm removes a Cid from the internal map.
func (st *MapState) Rm(c *cid.Cid) error {
func (st *MapState) Rm(c cid.Cid) error {
st.pinMux.Lock()
defer st.pinMux.Unlock()
delete(st.PinMap, c.String())
@ -59,9 +59,9 @@ func (st *MapState) Rm(c *cid.Cid) error {
// The returned object has its Cid and Allocations
// fields initialized, regardless of the
// presence of the provided Cid in the state.
// To check the presence, use MapState.Has(*cid.Cid).
func (st *MapState) Get(c *cid.Cid) (api.Pin, bool) {
if c == nil {
// To check the presence, use MapState.Has(cid.Cid).
func (st *MapState) Get(c cid.Cid) (api.Pin, bool) {
if !c.Defined() {
return api.PinCid(c), false
}
st.pinMux.RLock()
@ -74,7 +74,7 @@ func (st *MapState) Get(c *cid.Cid) (api.Pin, bool) {
}
// Has returns true if the Cid belongs to the State.
func (st *MapState) Has(c *cid.Cid) bool {
func (st *MapState) Has(c cid.Cid) bool {
st.pinMux.RLock()
defer st.pinMux.RUnlock()
_, ok := st.PinMap[c.String()]

View File

@ -26,7 +26,7 @@ var (
// MustDecodeCid provides a test helper that ignores
// errors from cid.Decode.
func MustDecodeCid(v string) *cid.Cid {
func MustDecodeCid(v string) cid.Cid {
c, _ := cid.Decode(v)
return c
}

View File

@ -95,7 +95,7 @@ func containsPeer(list []peer.ID, peer peer.ID) bool {
return false
}
func containsCid(list []*cid.Cid, ci *cid.Cid) bool {
func containsCid(list []cid.Cid, ci cid.Cid) bool {
for _, c := range list {
if c.String() == ci.String() {
return true