fix: Fix typos (#1001)

Fix typos in files
This commit is contained in:
Yang Hau 2020-02-03 17:30:04 +08:00 committed by GitHub
parent 392a139676
commit 7986d94242
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 24 additions and 24 deletions

View File

@ -109,7 +109,7 @@ The `ipfs-cluster-follow` application is an easy to use way to run one or severa
That said, the configuration layout and folder is the same for both `ipfs-cluster-service` and `ipfs-cluster-follow` and they can be run one in place of the other. In the same way, remote-source configurations usually used for `ipfs-cluster-follow` can be replaced with local ones usually used by `ipfs-cluster-service`.
The removal of the `map pintracker` has resulted in a simplification of some operations. `StateSync` (regularly run every `state_sync_interval`) does not trigger repinnings now, but only checks for pin expirations. `RecoverAllLocal` (reguarly run every `pin_recover_interval`) will now trigger repinnings when necessary (i.e. when things that were expected to be on IPFS are not). On very large pinsets, this operation can trigger a memory spike as the full recursive pinset from IPFS is requested and loaded on memory (before this happened on `StateSync`).
The removal of the `map pintracker` has resulted in a simplification of some operations. `StateSync` (regularly run every `state_sync_interval`) does not trigger repinnings now, but only checks for pin expirations. `RecoverAllLocal` (regularly run every `pin_recover_interval`) will now trigger repinnings when necessary (i.e. when things that were expected to be on IPFS are not). On very large pinsets, this operation can trigger a memory spike as the full recursive pinset from IPFS is requested and loaded on memory (before this happened on `StateSync`).
---
@ -725,7 +725,7 @@ adding a file, which is always the root hash.
* IPFS Proxy extraction to its own `API` component: `ipfsproxy` | [ipfs/ipfs-cluster#453](https://github.com/ipfs/ipfs-cluster/issues/453) | [ipfs/ipfs-cluster#576](https://github.com/ipfs/ipfs-cluster/issues/576) | [ipfs/ipfs-cluster#616](https://github.com/ipfs/ipfs-cluster/issues/616) | [ipfs/ipfs-cluster#617](https://github.com/ipfs/ipfs-cluster/issues/617)
* Add full CORS handling to `restapi` | [ipfs/ipfs-cluster#639](https://github.com/ipfs/ipfs-cluster/issues/639) | [ipfs/ipfs-cluster#640](https://github.com/ipfs/ipfs-cluster/issues/640)
* `restapi` configuration section entries can be overriden from environment variables | [ipfs/ipfs-cluster#609](https://github.com/ipfs/ipfs-cluster/issues/609)
* `restapi` configuration section entries can be overridden from environment variables | [ipfs/ipfs-cluster#609](https://github.com/ipfs/ipfs-cluster/issues/609)
* Update to `go-ipfs-files` 2.0 | [ipfs/ipfs-cluster#613](https://github.com/ipfs/ipfs-cluster/issues/613)
* Tests for the `/monitor/metrics` endpoint | [ipfs/ipfs-cluster#587](https://github.com/ipfs/ipfs-cluster/issues/587) | [ipfs/ipfs-cluster#622](https://github.com/ipfs/ipfs-cluster/issues/622)
* Support `stream-channels=fase` query parameter in `/add` | [ipfs/ipfs-cluster#632](https://github.com/ipfs/ipfs-cluster/issues/632) | [ipfs/ipfs-cluster#633](https://github.com/ipfs/ipfs-cluster/issues/633)

View File

@ -153,7 +153,7 @@ func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (cid.Cid, erro
// followed suit, it no longer receives the name of the
// file/folder being added and does not emit AddedOutput
// events with the right names. We addressed this by adding
// OutputPrefix to our version. go-ipfs modifies emmited
// OutputPrefix to our version. go-ipfs modifies emitted
// events before sending to user).
ipfsAdder.OutputPrefix = it.Name()

View File

@ -61,7 +61,7 @@ type Adder struct {
liveNodes uint64
lastFile mfs.FSNode
// Cluster: ipfs does a hack in commands/add.go to set the filenames
// in emmited events correctly. We carry a root folder name (or a
// in emitted events correctly. We carry a root folder name (or a
// filename in the case of single files here and emit those events
// correctly from the beginning).
OutputPrefix string

View File

@ -11,7 +11,7 @@ package sharding
// all of the links. Note that this limit is only reached at shard sizes 7
// times the size of the current default and then only when files are all
// 1 byte in size. In the future we may generalize the shard dag to multiple
// indirect nodes to accomodate much bigger shard sizes. Also note that the
// indirect nodes to accommodate much bigger shard sizes. Also note that the
// move to using the identity hash function in cids of very small data
// will improve link density in shard nodes and further reduce the need for
// multiple levels of indirection.

View File

@ -133,7 +133,7 @@ func (dgs *DAGService) Finalize(ctx context.Context, dataRoot cid.Cid) (cid.Cid,
// Consider doing this? Seems like overkill
//
// // Ammend ShardPins to reference clusterDAG root hash as a Parent
// // Amend ShardPins to reference clusterDAG root hash as a Parent
// shardParents := cid.NewSet()
// shardParents.Add(clusterDAG)
// for shardN, shard := range dgs.shardNodes {
@ -212,7 +212,7 @@ func (dgs *DAGService) logStats(metaPin, clusterDAGPin cid.Cid) {
rate = humanize.Bytes(dgs.totalSize / seconds)
}
statsFmt := `sharding session sucessful:
statsFmt := `sharding session successful:
CID: %s
ClusterDAG: %s
Total shards: %d

View File

@ -45,7 +45,7 @@ func newShard(ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard
}
// TODO (hector): get latest metrics for allocations, adjust sizeLimit
// to minumum. This can be done later.
// to minimum. This can be done later.
return &shard{
rpc: rpc,

View File

@ -51,7 +51,7 @@ func (ba *BlockAdder) Add(ctx context.Context, node ipld.Node) error {
rpcutil.RPCDiscardReplies(len(ba.dests)),
)
var sucessfulDests []peer.ID
var successfulDests []peer.ID
for i, e := range errs {
if e != nil {
logger.Errorf("BlockPut on %s: %s", ba.dests[i], e)
@ -64,14 +64,14 @@ func (ba *BlockAdder) Add(ctx context.Context, node ipld.Node) error {
if rpc.IsRPCError(e) {
continue
}
sucessfulDests = append(sucessfulDests, ba.dests[i])
successfulDests = append(successfulDests, ba.dests[i])
}
if len(sucessfulDests) == 0 {
if len(successfulDests) == 0 {
return ErrBlockAdder
}
ba.dests = sucessfulDests
ba.dests = successfulDests
return nil
}

View File

@ -340,7 +340,7 @@ func (c *defaultClient) RepoGC(ctx context.Context, local bool) (*api.GlobalRepo
}
// WaitFor is a utility function that allows for a caller to wait for a
// paticular status for a CID (as defined by StatusFilterParams).
// particular status for a CID (as defined by StatusFilterParams).
// It returns the final status for that CID and an error, if there was.
//
// WaitFor works by calling Status() repeatedly and checking that all

View File

@ -101,7 +101,7 @@ func (c *defaultClient) handleResponse(resp *http.Response, obj interface{}) err
case resp.StatusCode == http.StatusAccepted:
logger.Debug("Request accepted")
case resp.StatusCode == http.StatusNoContent:
logger.Debug("Request suceeded. Response has no content")
logger.Debug("Request succeeded. Response has no content")
default:
if resp.StatusCode > 399 && resp.StatusCode < 600 {
var apiErr api.Error

View File

@ -491,7 +491,7 @@ func (c *Cluster) watchPeers() {
}
}
// reBootstrap reguarly attempts to bootstrap (re-connect to peers from the
// reBootstrap regularly attempts to bootstrap (re-connect to peers from the
// peerstore). This should ensure that we auto-recover from situations in
// which the network was completely gone and we lost all peers.
func (c *Cluster) reBootstrap() {
@ -1428,7 +1428,7 @@ func (c *Cluster) Unpin(ctx context.Context, h cid.Cid) (*api.Pin, error) {
case api.DataType:
return pin, c.consensus.LogUnpin(ctx, pin)
case api.ShardType:
err := "cannot unpin a shard direclty. Unpin content root CID instead."
err := "cannot unpin a shard directly. Unpin content root CID instead."
return pin, errors.New(err)
case api.MetaType:
// Unpin cluster dag and referenced shards

View File

@ -133,7 +133,7 @@ type Config struct {
// If true, DisableRepinning, ensures that no repinning happens
// when a node goes down.
// This is useful when doing certain types of maintainance, or simply
// This is useful when doing certain types of maintenance, or simply
// when not wanting to rely on the monitoring system which needs a revamp.
DisableRepinning bool
@ -361,7 +361,7 @@ func (cfg *Config) setDefaults() {
cfg.MDNSInterval = DefaultMDNSInterval
cfg.DisableRepinning = DefaultDisableRepinning
cfg.FollowerMode = DefaultFollowerMode
cfg.PeerstoreFile = "" // empty so it gets ommited.
cfg.PeerstoreFile = "" // empty so it gets omitted.
cfg.PeerAddresses = []ma.Multiaddr{}
cfg.RPCPolicy = DefaultRPCPolicy
}

View File

@ -305,7 +305,7 @@ func runCmd(c *cli.Context) error {
// Allow customization via env vars
err = apiCfg.ApplyEnvVars()
if err != nil {
return cli.Exit(errors.Wrap(err, "error applying enviromental variables to restapi configuration"), 1)
return cli.Exit(errors.Wrap(err, "error applying environmental variables to restapi configuration"), 1)
}
rest, err := rest.NewAPI(ctx, &apiCfg)

View File

@ -17,7 +17,7 @@ const lockFileName = "cluster.lock"
var locker *lock
// lock helps to coordinate procees via a lock file
// lock helps to coordinate proceeds via a lock file
type lock struct {
lockCloser io.Closer
path string

View File

@ -133,7 +133,7 @@ func ErrorOut(m string, a ...interface{}) {
// WaitForIPFS hangs until IPFS API becomes available or the given context is
// cancelled. The IPFS API location is determined by the default ipfshttp
// component configuration and can be overriden using environment variables
// component configuration and can be overridden using environment variables
// that affect that configuration. Note that we have to do this in the blind,
// since we want to wait for IPFS before we even fetch the IPFS component
// configuration (because the configuration might be hosted on IPFS itself)

View File

@ -2,7 +2,7 @@
// allows to orchestrate pinning operations among several IPFS nodes.
//
// IPFS Cluster peers form a separate libp2p swarm. A Cluster peer uses
// multiple Cluster Componenets which perform different tasks like managing
// multiple Cluster Components which perform different tasks like managing
// the underlying IPFS daemons, or providing APIs for external control.
package ipfscluster
@ -56,7 +56,7 @@ type Consensus interface {
// IsTrustedPeer returns true if the given peer is "trusted".
// This will grant access to more rpc endpoints and a
// non-trusted one. This should be fast as it will be
// called repeteadly for every remote RPC request.
// called repeatedly for every remote RPC request.
IsTrustedPeer(context.Context, peer.ID) bool
// Trust marks a peer as "trusted".
Trust(context.Context, peer.ID) error

View File

@ -925,7 +925,7 @@ func (ipfs *Connector) BlockGet(ctx context.Context, c cid.Cid) ([]byte, error)
// if err != nil {
// return err
// }
// logger.Debugf("refs for %s sucessfully fetched", c)
// logger.Debugf("refs for %s successfully fetched", c)
// return nil
// }