2018-10-13 14:27:03 +00:00
|
|
|
package ipfsproxy
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2019-09-10 06:59:03 +00:00
|
|
|
"io"
|
2018-10-13 14:27:03 +00:00
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"net/http/httputil"
|
|
|
|
"net/url"
|
2019-09-10 06:59:03 +00:00
|
|
|
"os"
|
2018-10-13 14:27:03 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-12-18 14:44:11 +00:00
|
|
|
"github.com/ipfs/ipfs-cluster/adder/adderutils"
|
|
|
|
"github.com/ipfs/ipfs-cluster/api"
|
|
|
|
"github.com/ipfs/ipfs-cluster/rpcutil"
|
|
|
|
|
2019-09-10 06:59:03 +00:00
|
|
|
handlers "github.com/gorilla/handlers"
|
2019-01-10 19:03:59 +00:00
|
|
|
mux "github.com/gorilla/mux"
|
2018-10-13 14:27:03 +00:00
|
|
|
cid "github.com/ipfs/go-cid"
|
2021-06-28 20:10:33 +00:00
|
|
|
cmd "github.com/ipfs/go-ipfs-cmds"
|
2020-03-13 20:40:02 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2019-03-01 17:46:27 +00:00
|
|
|
path "github.com/ipfs/go-path"
|
2019-06-14 10:41:11 +00:00
|
|
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
2018-11-04 03:27:09 +00:00
|
|
|
rpc "github.com/libp2p/go-libp2p-gorpc"
|
2018-10-13 14:27:03 +00:00
|
|
|
madns "github.com/multiformats/go-multiaddr-dns"
|
2020-08-27 12:10:58 +00:00
|
|
|
manet "github.com/multiformats/go-multiaddr/net"
|
2019-03-01 17:46:27 +00:00
|
|
|
|
|
|
|
"go.opencensus.io/plugin/ochttp"
|
|
|
|
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
|
|
|
|
"go.opencensus.io/trace"
|
2018-10-13 14:27:03 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// DNSTimeout is used when resolving DNS multiaddresses in this module
|
|
|
|
var DNSTimeout = 5 * time.Second
|
|
|
|
|
2019-09-10 06:59:03 +00:00
|
|
|
var (
|
|
|
|
logger = logging.Logger("ipfsproxy")
|
|
|
|
proxyLogger = logging.Logger("ipfsproxylog")
|
|
|
|
)
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
// Server offers an IPFS API, hijacking some interesting requests
|
2018-10-13 14:27:03 +00:00
|
|
|
// and forwarding the rest to the ipfs daemon
|
|
|
|
// it proxies HTTP requests to the configured IPFS
|
|
|
|
// daemon. It is able to intercept these requests though, and
|
|
|
|
// perform extra operations on them.
|
2018-11-01 10:24:05 +00:00
|
|
|
type Server struct {
|
2018-10-13 14:27:03 +00:00
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
config *Config
|
|
|
|
nodeScheme string
|
|
|
|
nodeAddr string
|
2018-10-13 14:27:03 +00:00
|
|
|
|
|
|
|
rpcClient *rpc.Client
|
|
|
|
rpcReady chan struct{}
|
|
|
|
|
2020-02-28 16:16:16 +00:00
|
|
|
listeners []net.Listener // proxy listener
|
2019-01-10 19:03:59 +00:00
|
|
|
server *http.Server // proxy server
|
|
|
|
ipfsRoundTripper http.RoundTripper // allows to talk to IPFS
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
ipfsHeadersStore sync.Map
|
2018-12-18 14:44:11 +00:00
|
|
|
|
2018-10-13 14:27:03 +00:00
|
|
|
shutdownLock sync.Mutex
|
|
|
|
shutdown bool
|
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
type ipfsPinType struct {
|
|
|
|
Type string
|
|
|
|
}
|
|
|
|
|
|
|
|
type ipfsPinLsResp struct {
|
|
|
|
Keys map[string]ipfsPinType
|
|
|
|
}
|
|
|
|
|
|
|
|
type ipfsPinOpResp struct {
|
|
|
|
Pins []string
|
|
|
|
}
|
|
|
|
|
|
|
|
// From https://github.com/ipfs/go-ipfs/blob/master/core/coreunix/add.go#L49
|
|
|
|
type ipfsAddResp struct {
|
|
|
|
Name string
|
|
|
|
Hash string `json:",omitempty"`
|
|
|
|
Bytes int64 `json:",omitempty"`
|
|
|
|
Size string `json:",omitempty"`
|
|
|
|
}
|
|
|
|
|
2019-09-10 06:59:03 +00:00
|
|
|
type logWriter struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lw logWriter) Write(b []byte) (int, error) {
|
|
|
|
proxyLogger.Infof(string(b))
|
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
// New returns and ipfs Proxy component
|
|
|
|
func New(cfg *Config) (*Server, error) {
|
2018-10-13 14:27:03 +00:00
|
|
|
err := cfg.Validate()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeMAddr := cfg.NodeAddr
|
|
|
|
// dns multiaddresses need to be resolved first
|
|
|
|
if madns.Matches(nodeMAddr) {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), DNSTimeout)
|
|
|
|
defer cancel()
|
|
|
|
resolvedAddrs, err := madns.Resolve(ctx, cfg.NodeAddr)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodeMAddr = resolvedAddrs[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
_, nodeAddr, err := manet.DialArgs(nodeMAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-28 16:16:16 +00:00
|
|
|
var listeners []net.Listener
|
|
|
|
for _, addr := range cfg.ListenAddr {
|
|
|
|
proxyNet, proxyAddr, err := manet.DialArgs(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2020-02-28 16:16:16 +00:00
|
|
|
l, err := net.Listen(proxyNet, proxyAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
listeners = append(listeners, l)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
nodeScheme := "http"
|
|
|
|
if cfg.NodeHTTPS {
|
|
|
|
nodeScheme = "https"
|
|
|
|
}
|
|
|
|
nodeHTTPAddr := fmt.Sprintf("%s://%s", nodeScheme, nodeAddr)
|
2018-10-13 14:27:03 +00:00
|
|
|
proxyURL, err := url.Parse(nodeHTTPAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-27 04:03:15 +00:00
|
|
|
var handler http.Handler
|
2019-01-10 19:03:59 +00:00
|
|
|
router := mux.NewRouter()
|
2018-06-27 04:03:15 +00:00
|
|
|
handler = router
|
|
|
|
|
|
|
|
if cfg.Tracing {
|
|
|
|
handler = &ochttp.Handler{
|
|
|
|
IsPublicEndpoint: true,
|
|
|
|
Propagation: &tracecontext.HTTPFormat{},
|
|
|
|
Handler: router,
|
|
|
|
StartOptions: trace.StartOptions{SpanKind: trace.SpanKindServer},
|
|
|
|
FormatSpanName: func(req *http.Request) string {
|
|
|
|
return "proxy:" + req.Host + ":" + req.URL.Path + ":" + req.Method
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-10 06:59:03 +00:00
|
|
|
var writer io.Writer
|
|
|
|
if cfg.LogFile != "" {
|
|
|
|
f, err := os.OpenFile(cfg.getLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
writer = f
|
|
|
|
} else {
|
|
|
|
writer = logWriter{}
|
|
|
|
}
|
|
|
|
|
2018-10-13 14:27:03 +00:00
|
|
|
s := &http.Server{
|
2018-12-12 18:51:21 +00:00
|
|
|
ReadTimeout: cfg.ReadTimeout,
|
|
|
|
WriteTimeout: cfg.WriteTimeout,
|
|
|
|
ReadHeaderTimeout: cfg.ReadHeaderTimeout,
|
|
|
|
IdleTimeout: cfg.IdleTimeout,
|
2019-09-10 06:59:03 +00:00
|
|
|
Handler: handlers.LoggingHandler(writer, handler),
|
2019-03-07 05:28:06 +00:00
|
|
|
MaxHeaderBytes: cfg.MaxHeaderBytes,
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// See: https://github.com/ipfs/go-ipfs/issues/5168
|
|
|
|
// See: https://github.com/ipfs/ipfs-cluster/issues/548
|
|
|
|
// on why this is re-enabled.
|
2018-11-01 10:24:05 +00:00
|
|
|
s.SetKeepAlivesEnabled(true) // A reminder that this can be changed
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
reverseProxy := httputil.NewSingleHostReverseProxy(proxyURL)
|
|
|
|
reverseProxy.Transport = http.DefaultTransport
|
2018-10-13 14:27:03 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2018-11-01 10:24:05 +00:00
|
|
|
proxy := &Server{
|
2019-01-10 19:03:59 +00:00
|
|
|
ctx: ctx,
|
|
|
|
config: cfg,
|
|
|
|
cancel: cancel,
|
|
|
|
nodeAddr: nodeHTTPAddr,
|
|
|
|
nodeScheme: nodeScheme,
|
|
|
|
rpcReady: make(chan struct{}, 1),
|
2020-02-28 16:16:16 +00:00
|
|
|
listeners: listeners,
|
2019-01-10 19:03:59 +00:00
|
|
|
server: s,
|
|
|
|
ipfsRoundTripper: reverseProxy.Transport,
|
2018-12-18 14:44:11 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
// Ideally, we should only intercept POST requests, but
|
|
|
|
// people may be calling the API with GET or worse, PUT
|
|
|
|
// because IPFS has been allowing this traditionally.
|
|
|
|
// The main idea here is that we do not intercept
|
|
|
|
// OPTIONS requests (or HEAD).
|
|
|
|
hijackSubrouter := router.
|
|
|
|
Methods(http.MethodPost, http.MethodGet, http.MethodPut).
|
|
|
|
PathPrefix("/api/v0").
|
|
|
|
Subrouter()
|
|
|
|
|
|
|
|
// Add hijacked routes
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/add/{arg}").
|
|
|
|
HandlerFunc(slashHandler(proxy.pinHandler)).
|
|
|
|
Name("PinAddSlash") // supports people using the API wrong.
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/add").
|
|
|
|
HandlerFunc(proxy.pinHandler).
|
|
|
|
Name("PinAdd")
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/rm/{arg}").
|
|
|
|
HandlerFunc(slashHandler(proxy.unpinHandler)).
|
|
|
|
Name("PinRmSlash") // supports people using the API wrong.
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/rm").
|
|
|
|
HandlerFunc(proxy.unpinHandler).
|
|
|
|
Name("PinRm")
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/ls/{arg}").
|
|
|
|
HandlerFunc(slashHandler(proxy.pinLsHandler)).
|
|
|
|
Name("PinLsSlash") // supports people using the API wrong.
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/ls").
|
|
|
|
HandlerFunc(proxy.pinLsHandler).
|
|
|
|
Name("PinLs")
|
2019-04-29 14:36:40 +00:00
|
|
|
hijackSubrouter.
|
|
|
|
Path("/pin/update").
|
|
|
|
HandlerFunc(proxy.pinUpdateHandler).
|
|
|
|
Name("PinUpdate")
|
2019-01-10 19:03:59 +00:00
|
|
|
hijackSubrouter.
|
|
|
|
Path("/add").
|
|
|
|
HandlerFunc(proxy.addHandler).
|
|
|
|
Name("Add")
|
|
|
|
hijackSubrouter.
|
|
|
|
Path("/repo/stat").
|
|
|
|
HandlerFunc(proxy.repoStatHandler).
|
|
|
|
Name("RepoStat")
|
2019-12-06 12:08:57 +00:00
|
|
|
hijackSubrouter.
|
|
|
|
Path("/repo/gc").
|
|
|
|
HandlerFunc(proxy.repoGCHandler).
|
|
|
|
Name("RepoGC")
|
2019-01-10 19:03:59 +00:00
|
|
|
|
|
|
|
// Everything else goes to the IPFS daemon.
|
|
|
|
router.PathPrefix("/").Handler(reverseProxy)
|
2018-11-01 10:24:05 +00:00
|
|
|
|
|
|
|
go proxy.run()
|
|
|
|
return proxy, nil
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetClient makes the component ready to perform RPC
|
|
|
|
// requests.
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) SetClient(c *rpc.Client) {
|
|
|
|
proxy.rpcClient = c
|
|
|
|
proxy.rpcReady <- struct{}{}
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown stops any listeners and stops the component from taking
|
|
|
|
// any requests.
|
2018-06-27 04:03:15 +00:00
|
|
|
func (proxy *Server) Shutdown(ctx context.Context) error {
|
2018-11-01 10:24:05 +00:00
|
|
|
proxy.shutdownLock.Lock()
|
|
|
|
defer proxy.shutdownLock.Unlock()
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
if proxy.shutdown {
|
2018-10-13 14:27:03 +00:00
|
|
|
logger.Debug("already shutdown")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Info("stopping IPFS Proxy")
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
proxy.cancel()
|
|
|
|
close(proxy.rpcReady)
|
|
|
|
proxy.server.SetKeepAlivesEnabled(false)
|
2020-02-28 16:16:16 +00:00
|
|
|
for _, l := range proxy.listeners {
|
|
|
|
l.Close()
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
proxy.wg.Wait()
|
|
|
|
proxy.shutdown = true
|
2018-10-13 14:27:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-14 10:37:42 +00:00
|
|
|
// launches proxy when we receive the rpcReady signal.
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) run() {
|
|
|
|
<-proxy.rpcReady
|
2018-10-13 14:27:03 +00:00
|
|
|
|
|
|
|
// Do not shutdown while launching threads
|
2018-11-01 10:24:05 +00:00
|
|
|
// -- prevents race conditions with proxy.wg.
|
|
|
|
proxy.shutdownLock.Lock()
|
|
|
|
defer proxy.shutdownLock.Unlock()
|
2018-10-13 14:27:03 +00:00
|
|
|
|
|
|
|
// This launches the proxy
|
2020-02-28 16:16:16 +00:00
|
|
|
proxy.wg.Add(len(proxy.listeners))
|
|
|
|
for _, l := range proxy.listeners {
|
|
|
|
go func(l net.Listener) {
|
|
|
|
defer proxy.wg.Done()
|
|
|
|
|
|
|
|
maddr, err := manet.FromNetAddr(l.Addr())
|
|
|
|
if err != nil {
|
|
|
|
logger.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Infof(
|
|
|
|
"IPFS Proxy: %s -> %s",
|
|
|
|
maddr,
|
|
|
|
proxy.config.NodeAddr,
|
|
|
|
)
|
|
|
|
err = proxy.server.Serve(l) // hangs here
|
|
|
|
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
|
|
|
|
logger.Error(err)
|
|
|
|
}
|
|
|
|
}(l)
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
// ipfsErrorResponder writes an http error response just like IPFS would.
|
2019-05-02 09:32:13 +00:00
|
|
|
func ipfsErrorResponder(w http.ResponseWriter, errMsg string, code int) {
|
2021-06-28 20:10:33 +00:00
|
|
|
res := cmd.Errorf(cmd.ErrNormal, errMsg)
|
|
|
|
|
2018-10-13 14:27:03 +00:00
|
|
|
resBytes, _ := json.Marshal(res)
|
2019-05-02 09:32:13 +00:00
|
|
|
if code > 0 {
|
|
|
|
w.WriteHeader(code)
|
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusInternalServerError)
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
w.Write(resBytes)
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Request) {
|
2019-01-10 19:03:59 +00:00
|
|
|
proxy.setHeaders(w.Header(), r)
|
2018-12-18 14:44:11 +00:00
|
|
|
|
2020-04-20 12:53:17 +00:00
|
|
|
q := r.URL.Query()
|
|
|
|
arg := q.Get("arg")
|
2019-03-01 17:46:27 +00:00
|
|
|
p, err := path.ParsePath(arg)
|
2018-10-13 14:27:03 +00:00
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "Error parsing IPFS Path: "+err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-03-19 01:52:46 +00:00
|
|
|
pinPath := api.PinPath{Path: p.String()}
|
2020-04-20 12:53:17 +00:00
|
|
|
pinPath.Mode = api.PinModeFromString(q.Get("type"))
|
|
|
|
|
2019-03-01 17:46:27 +00:00
|
|
|
var pin api.Pin
|
2018-11-01 10:24:05 +00:00
|
|
|
err = proxy.rpcClient.Call(
|
2018-10-13 14:27:03 +00:00
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
op,
|
2019-03-01 17:46:27 +00:00
|
|
|
pinPath,
|
|
|
|
&pin,
|
2018-10-13 14:27:03 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
res := ipfsPinOpResp{
|
2019-03-01 17:46:27 +00:00
|
|
|
Pins: []string{pin.Cid.String()},
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(res)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) pinHandler(w http.ResponseWriter, r *http.Request) {
|
2019-03-01 17:46:27 +00:00
|
|
|
proxy.pinOpHandler("PinPath", w, r)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) unpinHandler(w http.ResponseWriter, r *http.Request) {
|
2019-03-01 17:46:27 +00:00
|
|
|
proxy.pinOpHandler("UnpinPath", w, r)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
|
2019-01-10 19:03:59 +00:00
|
|
|
proxy.setHeaders(w.Header(), r)
|
2018-12-18 14:44:11 +00:00
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
arg := r.URL.Query().Get("arg")
|
2022-03-22 09:56:16 +00:00
|
|
|
|
|
|
|
stream := false
|
|
|
|
streamArg := r.URL.Query().Get("stream")
|
|
|
|
streamArg2 := r.URL.Query().Get("s")
|
|
|
|
if streamArg == "true" || streamArg2 == "true" {
|
|
|
|
stream = true
|
|
|
|
}
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
if arg != "" {
|
2018-10-13 14:27:03 +00:00
|
|
|
c, err := cid.Decode(arg)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
2019-02-27 17:04:35 +00:00
|
|
|
var pin api.Pin
|
2022-03-19 01:52:46 +00:00
|
|
|
err = proxy.rpcClient.CallContext(
|
|
|
|
r.Context(),
|
2018-10-13 14:27:03 +00:00
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"PinGet",
|
2019-02-27 17:04:35 +00:00
|
|
|
c,
|
2018-10-13 14:27:03 +00:00
|
|
|
&pin,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
2022-03-22 09:56:16 +00:00
|
|
|
if stream {
|
|
|
|
ipinfo := api.IPFSPinInfo{
|
|
|
|
Cid: api.Cid(pin.Cid),
|
|
|
|
Type: pin.Mode.ToIPFSPinStatus(),
|
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(ipinfo)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
|
|
|
} else {
|
|
|
|
pinLs := ipfsPinLsResp{}
|
|
|
|
pinLs.Keys = make(map[string]ipfsPinType)
|
|
|
|
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
|
|
|
Type: "recursive",
|
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(pinLs)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
} else {
|
2022-03-19 01:52:46 +00:00
|
|
|
in := make(chan struct{})
|
|
|
|
close(in)
|
|
|
|
|
|
|
|
pins := make(chan api.Pin)
|
|
|
|
var err error
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
err = proxy.rpcClient.Stream(
|
|
|
|
r.Context(),
|
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"Pins",
|
|
|
|
in,
|
|
|
|
pins,
|
|
|
|
)
|
|
|
|
}()
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2022-03-22 09:56:16 +00:00
|
|
|
if stream {
|
|
|
|
w.Header().Set("Trailer", "X-Stream-Error")
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
for pin := range pins {
|
|
|
|
ipinfo := api.IPFSPinInfo{
|
|
|
|
Cid: api.Cid(pin.Cid),
|
|
|
|
Type: pin.Mode.ToIPFSPinStatus(),
|
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(ipinfo)
|
|
|
|
w.Write(resBytes)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
if err != nil {
|
|
|
|
w.Header().Add("X-Stream-Error", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pinLs := ipfsPinLsResp{}
|
|
|
|
pinLs.Keys = make(map[string]ipfsPinType)
|
|
|
|
|
|
|
|
for pin := range pins {
|
|
|
|
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
|
|
|
|
Type: "recursive",
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
2022-03-19 01:52:46 +00:00
|
|
|
|
2022-03-22 09:56:16 +00:00
|
|
|
wg.Wait()
|
|
|
|
if err != nil {
|
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(pinLs)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
2022-03-19 01:52:46 +00:00
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-29 14:36:40 +00:00
|
|
|
func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx, span := trace.StartSpan(r.Context(), "ipfsproxy/pinUpdateHandler")
|
|
|
|
defer span.End()
|
|
|
|
|
|
|
|
proxy.setHeaders(w.Header(), r)
|
|
|
|
|
|
|
|
// Check that we have enough arguments and mimic ipfs response when not
|
|
|
|
q := r.URL.Query()
|
|
|
|
args := q["arg"]
|
|
|
|
if len(args) == 0 {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "argument \"from-path\" is required", http.StatusBadRequest)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(args) == 1 {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "argument \"to-path\" is required", http.StatusBadRequest)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
unpin := !(q.Get("unpin") == "false")
|
|
|
|
from := args[0]
|
|
|
|
to := args[1]
|
|
|
|
|
|
|
|
// Parse paths (we will need to resolve them)
|
|
|
|
pFrom, err := path.ParsePath(from)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "error parsing \"from-path\" argument: "+err.Error(), -1)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pTo, err := path.ParsePath(to)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "error parsing \"to-path\" argument: "+err.Error(), -1)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resolve the FROM argument
|
|
|
|
var fromCid cid.Cid
|
|
|
|
err = proxy.rpcClient.CallContext(
|
|
|
|
ctx,
|
|
|
|
"",
|
2019-05-04 20:36:10 +00:00
|
|
|
"IPFSConnector",
|
|
|
|
"Resolve",
|
2019-04-29 14:36:40 +00:00
|
|
|
pFrom.String(),
|
|
|
|
&fromCid,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-07-12 14:40:29 +00:00
|
|
|
// Do a PinPath setting PinUpdate
|
2022-03-19 01:52:46 +00:00
|
|
|
pinPath := api.PinPath{Path: pTo.String()}
|
2019-07-12 14:40:29 +00:00
|
|
|
pinPath.PinUpdate = fromCid
|
2019-04-29 14:36:40 +00:00
|
|
|
|
2019-07-12 14:40:29 +00:00
|
|
|
var pin api.Pin
|
|
|
|
err = proxy.rpcClient.Call(
|
2019-04-29 14:36:40 +00:00
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"PinPath",
|
2019-07-12 14:40:29 +00:00
|
|
|
pinPath,
|
|
|
|
&pin,
|
2019-04-29 14:36:40 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If unpin != "false", unpin the FROM argument
|
|
|
|
// (it was already resolved).
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
var pinObj api.Pin
|
2019-04-29 14:36:40 +00:00
|
|
|
if unpin {
|
|
|
|
err = proxy.rpcClient.CallContext(
|
|
|
|
ctx,
|
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"Unpin",
|
2019-07-12 14:40:29 +00:00
|
|
|
api.PinCid(fromCid),
|
Improve pin/unpin method signatures (#843)
* Improve pin/unpin method signatures:
These changes the following Cluster Go API methods:
* -> Cluster.Pin(ctx, cid, options) (pin, error)
* -> Cluster.Unpin(ctx, cid) (pin, error)
* -> Cluster.PinPath(ctx, path, opts) (pin,error)
Pin and Unpin now return the pinned object.
The signature of the methods now matches that of the API Client, is clearer as
to what options the user can set and is aligned with PinPath, UnpinPath, which
returned pin methods.
The REST API now returns the Pinned/Unpinned object rather than 204-Accepted.
This was necessary for a cleaner pin/update approach, which I'm working on in
another branch.
Most of the changes here are updating tests to the new signatures
* Adapt load-balancing client to new Pin/Unpin signatures
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
* cluster.go: Fix typo
Co-Authored-By: Kishan Sagathiya <kishansagathiya@gmail.com>
2019-07-22 13:39:11 +00:00
|
|
|
&pinObj,
|
2019-04-29 14:36:40 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2019-04-29 14:36:40 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
res := ipfsPinOpResp{
|
2019-07-12 14:40:29 +00:00
|
|
|
Pins: []string{fromCid.String(), pin.Cid.String()},
|
2019-04-29 14:36:40 +00:00
|
|
|
}
|
|
|
|
resBytes, _ := json.Marshal(res)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
|
2019-01-10 19:03:59 +00:00
|
|
|
proxy.setHeaders(w.Header(), r)
|
2018-12-18 14:44:11 +00:00
|
|
|
|
2018-10-13 14:27:03 +00:00
|
|
|
reader, err := r.MultipartReader()
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "error reading request: "+err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
q := r.URL.Query()
|
|
|
|
if q.Get("only-hash") == "true" {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "only-hash is not supported when adding to cluster", -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Luckily, most IPFS add query params are compatible with cluster's
|
|
|
|
// /add params. We can parse most of them directly from the query.
|
|
|
|
params, err := api.AddParamsFromQuery(q)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, "error parsing options:"+err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
trickle := q.Get("trickle")
|
|
|
|
if trickle == "true" {
|
|
|
|
params.Layout = "trickle"
|
|
|
|
}
|
2022-03-19 01:52:46 +00:00
|
|
|
nopin := q.Get("pin") == "false"
|
|
|
|
if nopin {
|
|
|
|
params.NoPin = true
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
|
2020-03-13 20:40:02 +00:00
|
|
|
logger.Warnf("Proxy/add does not support all IPFS params. Current options: %+v", params)
|
2018-10-13 14:27:03 +00:00
|
|
|
|
Adders: stream blocks to destinations
This commit fixes #810 and adds block streaming to the final destinations when
adding. This should add major performance gains when adding data to clusters.
Before, everytime cluster issued a block, it was broadcasted individually to
all destinations (new libp2p stream), where it was block/put to IPFS (a single
block/put http roundtrip per block).
Now, blocks are streamed all the way from the adder module to the ipfs daemon,
by making every block as it arrives a single part in a multipart block/put
request.
Before, block-broadcast needed to wait for all destinations to finish in order
to process the next block. Now, buffers allow some destinations to be faster
than others while sending and receiving blocks.
Before, if a block put request failed to be broadcasted everywhere, an error
would happen at that moment.
Now, we keep streaming until the end and only then report any errors. The
operation succeeds as long as at least one stream finished successfully.
Errors block/putting to IPFS will not abort streams. Instead, subsequent
blocks are retried with a new request, although the method will return an
error when the stream finishes if there were errors at any point.
2022-03-24 01:17:10 +00:00
|
|
|
outputTransform := func(in api.AddedOutput) interface{} {
|
2021-01-13 15:29:15 +00:00
|
|
|
cidStr := ""
|
|
|
|
if in.Cid.Defined() {
|
|
|
|
cidStr = in.Cid.String()
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
r := &ipfsAddResp{
|
|
|
|
Name: in.Name,
|
2021-01-13 15:29:15 +00:00
|
|
|
Hash: cidStr,
|
2018-10-13 14:27:03 +00:00
|
|
|
Bytes: int64(in.Bytes),
|
|
|
|
}
|
|
|
|
if in.Size != 0 {
|
|
|
|
r.Size = strconv.FormatUint(in.Size, 10)
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2022-03-19 01:52:46 +00:00
|
|
|
_, err = adderutils.AddMultipartHTTPHandler(
|
2018-11-01 10:24:05 +00:00
|
|
|
proxy.ctx,
|
|
|
|
proxy.rpcClient,
|
2018-10-13 14:27:03 +00:00
|
|
|
params,
|
|
|
|
reader,
|
|
|
|
w,
|
|
|
|
outputTransform,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2022-03-19 01:52:46 +00:00
|
|
|
logger.Error(err)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
|
2019-01-10 19:03:59 +00:00
|
|
|
proxy.setHeaders(w.Header(), r)
|
2018-12-18 14:44:11 +00:00
|
|
|
|
2018-11-03 14:54:15 +00:00
|
|
|
peers := make([]peer.ID, 0)
|
2018-11-01 10:24:05 +00:00
|
|
|
err := proxy.rpcClient.Call(
|
2018-10-13 14:27:03 +00:00
|
|
|
"",
|
2019-05-04 20:36:10 +00:00
|
|
|
"Consensus",
|
|
|
|
"Peers",
|
2018-10-13 14:27:03 +00:00
|
|
|
struct{}{},
|
|
|
|
&peers,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-05-02 09:32:13 +00:00
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
2018-10-13 14:27:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers))
|
2018-10-13 14:27:03 +00:00
|
|
|
defer rpcutil.MultiCancel(cancels)
|
|
|
|
|
2020-04-14 17:58:00 +00:00
|
|
|
repoStats := make([]*api.IPFSRepoStat, len(peers))
|
|
|
|
repoStatsIfaces := make([]interface{}, len(repoStats))
|
2018-10-13 14:27:03 +00:00
|
|
|
for i := range repoStats {
|
2019-02-27 18:43:29 +00:00
|
|
|
repoStats[i] = &api.IPFSRepoStat{}
|
2019-02-27 17:04:35 +00:00
|
|
|
repoStatsIfaces[i] = repoStats[i]
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 10:24:05 +00:00
|
|
|
errs := proxy.rpcClient.MultiCall(
|
2018-10-13 14:27:03 +00:00
|
|
|
ctxs,
|
|
|
|
peers,
|
2019-05-04 20:36:10 +00:00
|
|
|
"IPFSConnector",
|
|
|
|
"RepoStat",
|
2018-10-13 14:27:03 +00:00
|
|
|
struct{}{},
|
|
|
|
repoStatsIfaces,
|
|
|
|
)
|
|
|
|
|
|
|
|
totalStats := api.IPFSRepoStat{}
|
|
|
|
|
|
|
|
for i, err := range errs {
|
|
|
|
if err != nil {
|
2019-05-09 19:23:49 +00:00
|
|
|
if rpc.IsAuthorizationError(err) {
|
|
|
|
logger.Debug(err)
|
|
|
|
continue
|
|
|
|
}
|
2018-10-13 14:27:03 +00:00
|
|
|
logger.Errorf("%s repo/stat errored: %s", peers[i], err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
totalStats.RepoSize += repoStats[i].RepoSize
|
|
|
|
totalStats.StorageMax += repoStats[i].StorageMax
|
|
|
|
}
|
|
|
|
|
|
|
|
resBytes, _ := json.Marshal(totalStats)
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
w.Write(resBytes)
|
|
|
|
}
|
|
|
|
|
2019-12-06 12:08:57 +00:00
|
|
|
type ipfsRepoGCResp struct {
|
|
|
|
Key cid.Cid `json:",omitempty"`
|
|
|
|
Error string `json:",omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (proxy *Server) repoGCHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
queryValues := r.URL.Query()
|
|
|
|
streamErrors := queryValues.Get("stream-errors") == "true"
|
|
|
|
// ignoring `quiet` since it only affects text output
|
|
|
|
|
|
|
|
proxy.setHeaders(w.Header(), r)
|
|
|
|
|
|
|
|
w.Header().Set("Trailer", "X-Stream-Error")
|
|
|
|
var repoGC api.GlobalRepoGC
|
|
|
|
err := proxy.rpcClient.CallContext(
|
|
|
|
r.Context(),
|
|
|
|
"",
|
|
|
|
"Cluster",
|
|
|
|
"RepoGC",
|
|
|
|
struct{}{},
|
|
|
|
&repoGC,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
ipfsErrorResponder(w, err.Error(), -1)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
enc := json.NewEncoder(w)
|
|
|
|
var ipfsRepoGC ipfsRepoGCResp
|
|
|
|
mError := multiError{}
|
|
|
|
for _, gc := range repoGC.PeerMap {
|
|
|
|
for _, key := range gc.Keys {
|
|
|
|
if streamErrors {
|
|
|
|
ipfsRepoGC = ipfsRepoGCResp{Key: key.Key, Error: key.Error}
|
|
|
|
} else {
|
|
|
|
ipfsRepoGC = ipfsRepoGCResp{Key: key.Key}
|
|
|
|
if key.Error != "" {
|
|
|
|
mError.add(key.Error)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cluster tags start with small letter, but IPFS tags with capital letter.
|
|
|
|
if err := enc.Encode(ipfsRepoGC); err != nil {
|
|
|
|
logger.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mErrStr := mError.Error()
|
|
|
|
if !streamErrors && mErrStr != "" {
|
|
|
|
w.Header().Set("X-Stream-Error", mErrStr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-10 19:03:59 +00:00
|
|
|
// slashHandler returns a handler which converts a /a/b/c/<argument> request
|
2019-01-11 12:36:56 +00:00
|
|
|
// into an /a/b/c/<argument>?arg=<argument> one. And uses the given origHandler
|
2019-01-10 19:03:59 +00:00
|
|
|
// for it. Our handlers expect that arguments are passed in the ?arg query
|
|
|
|
// value.
|
|
|
|
func slashHandler(origHandler http.HandlerFunc) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
warnMsg := "You are using an undocumented form of the IPFS API. "
|
2018-10-13 14:27:03 +00:00
|
|
|
warnMsg += "Consider passing your command arguments"
|
|
|
|
warnMsg += "with the '?arg=' query parameter"
|
2019-01-10 19:03:59 +00:00
|
|
|
logger.Error(warnMsg)
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
arg := vars["arg"]
|
|
|
|
|
|
|
|
// IF we needed to modify the request path, we could do
|
|
|
|
// something along these lines. This is not the case
|
|
|
|
// at the moment. We just need to set the query argument.
|
|
|
|
//
|
|
|
|
// route := mux.CurrentRoute(r)
|
|
|
|
// path, err := route.GetPathTemplate()
|
|
|
|
// if err != nil {
|
|
|
|
// // I'd like to panic, but I don' want to kill a full
|
|
|
|
// // peer just because of a buggy use.
|
|
|
|
// logger.Critical("BUG: wrong use of slashHandler")
|
|
|
|
// origHandler(w, r) // proceed as nothing
|
|
|
|
// return
|
|
|
|
// }
|
|
|
|
// fixedPath := strings.TrimSuffix(path, "/{arg}")
|
|
|
|
// r.URL.Path = url.PathEscape(fixedPath)
|
|
|
|
// r.URL.RawPath = fixedPath
|
|
|
|
|
|
|
|
q := r.URL.Query()
|
|
|
|
q.Set("arg", arg)
|
|
|
|
r.URL.RawQuery = q.Encode()
|
|
|
|
origHandler(w, r)
|
2018-10-13 14:27:03 +00:00
|
|
|
}
|
|
|
|
}
|