diff --git a/core/node/builder.go b/core/node/builder.go index 1dee19130907..029ebd37ddf3 100644 --- a/core/node/builder.go +++ b/core/node/builder.go @@ -83,6 +83,7 @@ func (cfg *BuildCfg) fillDefaults() error { return nil } +// options creates fx option group from this build config func (cfg *BuildCfg) options(ctx context.Context) fx.Option { err := cfg.fillDefaults() if err != nil { diff --git a/core/node/core.go b/core/node/core.go index 84d32bd1ac41..5da6d2752e8d 100644 --- a/core/node/core.go +++ b/core/node/core.go @@ -25,7 +25,8 @@ import ( "go.uber.org/fx" ) -func BlockServiceCtor(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { +// BlockService creates new blockservice which provides an interface to fetch content-addressable blocks +func BlockService(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.Interface) blockservice.BlockService { bsvc := blockservice.New(bs, rem) lc.Append(fx.Hook{ @@ -37,6 +38,7 @@ func BlockServiceCtor(lc fx.Lifecycle, bs blockstore.Blockstore, rem exchange.In return bsvc } +// Pinning creates new pinner which tells GC which blocks should be kept func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) (pin.Pinner, error) { internalDag := merkledag.NewDAGService(blockservice.New(bstore, offline.Exchange(bstore))) pinning, err := pin.LoadPinner(repo.Datastore(), ds, internalDag) @@ -51,11 +53,13 @@ func Pinning(bstore blockstore.Blockstore, ds format.DAGService, repo repo.Repo) return pinning, nil } -func DagCtor(bs blockservice.BlockService) format.DAGService { +// Dag creates new DAGService +func Dag(bs blockservice.BlockService) format.DAGService { return merkledag.NewDAGService(bs) } -func OnlineExchangeCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.IpfsRouting, bs blockstore.GCBlockstore) exchange.Interface { +// OnlineExchange creates new LibP2P backed block exchange (BitSwap) +func OnlineExchange(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.IpfsRouting, bs blockstore.GCBlockstore) exchange.Interface { bitswapNetwork := network.NewFromIpfsHost(host, rt) exch := bitswap.New(helpers.LifecycleCtx(mctx, lc), bitswapNetwork, bs) lc.Append(fx.Hook{ @@ -66,6 +70,7 @@ func OnlineExchangeCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host return exch } +// Files loads persisted MFS root func Files(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo, dag format.DAGService) (*mfs.Root, error) { dsk := datastore.NewKey("/local/filesroot") pf := func(ctx context.Context, c cid.Cid) error { diff --git a/core/node/groups.go b/core/node/groups.go index 29c2e0f63d58..1a69f03de402 100644 --- a/core/node/groups.go +++ b/core/node/groups.go @@ -51,25 +51,29 @@ func LibP2P(cfg *BuildCfg) fx.Option { return opts } +// Storage groups units which setup datastore based persistence and blockstore layers func Storage(cfg *BuildCfg) fx.Option { return fx.Options( fx.Provide(RepoConfig), - fx.Provide(DatastoreCtor), + fx.Provide(Datastore), fx.Provide(BaseBlockstoreCtor(cfg.Permanent, cfg.NilRepo)), fx.Provide(GcBlockstoreCtor), ) } +// Identity groups units providing cryptographic identity var Identity = fx.Options( fx.Provide(PeerID), fx.Provide(PrivateKey), fx.Provide(libp2p.Peerstore), ) +// IPNS groups namesys related units var IPNS = fx.Options( fx.Provide(RecordValidator), ) +// Providers groups units managing provider routing records var Providers = fx.Options( fx.Provide(ProviderQueue), fx.Provide(ProviderCtor), @@ -78,10 +82,11 @@ var Providers = fx.Options( fx.Invoke(Reprovider), ) +// Online groups online-only units func Online(cfg *BuildCfg) fx.Option { return fx.Options( - fx.Provide(OnlineExchangeCtor), - fx.Provide(OnlineNamesysCtor), + fx.Provide(OnlineExchange), + fx.Provide(OnlineNamesys), fx.Invoke(IpnsRepublisher), @@ -92,16 +97,18 @@ func Online(cfg *BuildCfg) fx.Option { ) } +// Offline groups offline alternatives to Online units var Offline = fx.Options( fx.Provide(offline.Exchange), - fx.Provide(OfflineNamesysCtor), + fx.Provide(OfflineNamesys), fx.Provide(offroute.NewOfflineRouter), fx.Provide(provider.NewOfflineProvider), ) +// Core groups basic IPFS services var Core = fx.Options( - fx.Provide(BlockServiceCtor), - fx.Provide(DagCtor), + fx.Provide(BlockService), + fx.Provide(Dag), fx.Provide(resolver.NewBasicResolver), fx.Provide(Pinning), fx.Provide(Files), @@ -114,6 +121,7 @@ func Networked(cfg *BuildCfg) fx.Option { return Offline } +// IPFS builds a group of fx Options based on the passed BuildCfg func IPFS(ctx context.Context, cfg *BuildCfg) fx.Option { if cfg == nil { cfg = new(BuildCfg) diff --git a/core/node/helpers.go b/core/node/helpers.go index ea5fd1443ca9..17954b63a891 100644 --- a/core/node/helpers.go +++ b/core/node/helpers.go @@ -50,6 +50,7 @@ func setupSharding(cfg *config.Config) { uio.UseHAMTSharding = cfg.Experimental.ShardingEnabled } +// baseProcess creates a goprocess which is closed when the lifecycle signals it to stop func baseProcess(lc fx.Lifecycle) goprocess.Process { p := goprocess.WithParent(goprocess.Background()) lc.Append(fx.Hook{ diff --git a/core/node/identity.go b/core/node/identity.go index eb3903098757..336750082c22 100644 --- a/core/node/identity.go +++ b/core/node/identity.go @@ -9,6 +9,7 @@ import ( "github.com/libp2p/go-libp2p-peer" ) +// PeerID loads peer identity form config func PeerID(cfg *config.Config) (peer.ID, error) { cid := cfg.Identity.PeerID if cid == "" { @@ -26,6 +27,7 @@ func PeerID(cfg *config.Config) (peer.ID, error) { return id, nil } +// PrivateKey loads the private key from config func PrivateKey(cfg *config.Config, id peer.ID) (crypto.PrivKey, error) { if cfg.Identity.PrivKey == "" { return nil, nil diff --git a/core/node/ipns.go b/core/node/ipns.go index afd6e678df6e..58e9955f0f5d 100644 --- a/core/node/ipns.go +++ b/core/node/ipns.go @@ -19,6 +19,7 @@ import ( const DefaultIpnsCacheSize = 128 +// RecordValidator provides namesys compatible routing record validator func RecordValidator(ps peerstore.Peerstore) record.Validator { return record.NamespacedValidator{ "pk": record.PublicKeyValidator{}, @@ -26,11 +27,13 @@ func RecordValidator(ps peerstore.Peerstore) record.Validator { } } -func OfflineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSystem, error) { +// OfflineNamesys creates namesys setup for offline operation +func OfflineNamesys(rt routing.IpfsRouting, repo repo.Repo) (namesys.NameSystem, error) { return namesys.NewNameSystem(rt, repo.Datastore(), 0), nil } -func OnlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *config.Config) (namesys.NameSystem, error) { +// OnlineNamesys createn new namesys setup for online operation +func OnlineNamesys(rt routing.IpfsRouting, repo repo.Repo, cfg *config.Config) (namesys.NameSystem, error) { cs := cfg.Ipns.ResolveCacheSize if cs == 0 { cs = DefaultIpnsCacheSize @@ -41,6 +44,7 @@ func OnlineNamesysCtor(rt routing.IpfsRouting, repo repo.Repo, cfg *config.Confi return namesys.NewNameSystem(rt, repo.Datastore(), cs), nil } +// IpnsRepublisher runs new IPNS republisher service func IpnsRepublisher(lc lcProcess, cfg *config.Config, namesys namesys.NameSystem, repo repo.Repo, privKey crypto.PrivKey) error { repub := republisher.NewRepublisher(namesys, repo.Datastore(), privKey, repo.Keystore()) diff --git a/core/node/provider.go b/core/node/provider.go index 6b67dc070294..336a9fd36984 100644 --- a/core/node/provider.go +++ b/core/node/provider.go @@ -19,10 +19,12 @@ import ( const kReprovideFrequency = time.Hour * 12 +// ProviderQueue creates new datastore backed provider queue func ProviderQueue(mctx helpers.MetricsCtx, lc fx.Lifecycle, repo repo.Repo) (*provider.Queue, error) { return provider.NewQueue(helpers.LifecycleCtx(mctx, lc), "provider-v1", repo.Datastore()) } +// ProviderCtor creates new record provider func ProviderCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, queue *provider.Queue, rt routing.IpfsRouting) provider.Provider { p := provider.NewProvider(helpers.LifecycleCtx(mctx, lc), queue, rt) @@ -39,6 +41,7 @@ func ProviderCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, queue *provider.Queu return p } +// ReproviderCtor creates new reprovider func ReproviderCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, cfg *config.Config, bs BaseBlocks, ds format.DAGService, pinning pin.Pinner, rt routing.IpfsRouting) (*reprovide.Reprovider, error) { var keyProvider reprovide.KeyChanFunc @@ -57,6 +60,7 @@ func ReproviderCtor(mctx helpers.MetricsCtx, lc fx.Lifecycle, cfg *config.Config return reprovide.NewReprovider(helpers.LifecycleCtx(mctx, lc), rt, keyProvider), nil } +// Reprovider runs the reprovider service func Reprovider(cfg *config.Config, reprovider *reprovide.Reprovider) error { reproviderInterval := kReprovideFrequency if cfg.Reprovider.Interval != "" { @@ -68,6 +72,6 @@ func Reprovider(cfg *config.Config, reprovider *reprovide.Reprovider) error { reproviderInterval = dur } - go reprovider.Run(reproviderInterval) + go reprovider.Run(reproviderInterval) // TODO: refactor reprovider to have Start/Stop, use lifecycle return nil } diff --git a/core/node/storage.go b/core/node/storage.go index d42e9cc9a187..6b58be60b16e 100644 --- a/core/node/storage.go +++ b/core/node/storage.go @@ -28,16 +28,20 @@ func isTooManyFDError(err error) bool { return false } +// RepoConfig loads configuration from the repo func RepoConfig(repo repo.Repo) (*config.Config, error) { return repo.Config() } -func DatastoreCtor(repo repo.Repo) datastore.Datastore { +// Datastore provides the datastore +func Datastore(repo repo.Repo) datastore.Datastore { return repo.Datastore() } +// BaseBlocks is the lower level blockstore without GC or Filestore layers type BaseBlocks blockstore.Blockstore +// BaseBlockstoreCtor creates cached blockstore backed by the provided datastore func BaseBlockstoreCtor(permanent bool, nilRepo bool) func(mctx helpers.MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) { return func(mctx helpers.MetricsCtx, repo repo.Repo, cfg *config.Config, lc fx.Lifecycle) (bs BaseBlocks, err error) { rds := &retrystore.Datastore{ @@ -82,6 +86,7 @@ func BaseBlockstoreCtor(permanent bool, nilRepo bool) func(mctx helpers.MetricsC } } +// GcBlockstoreCtor wraps the base blockstore with GC and Filestore layers func GcBlockstoreCtor(repo repo.Repo, bb BaseBlocks, cfg *config.Config) (gclocker blockstore.GCLocker, gcbs blockstore.GCBlockstore, bs blockstore.Blockstore, fstore *filestore.Filestore) { gclocker = blockstore.NewGCLocker() gcbs = blockstore.NewGCBlockstore(bb, gclocker)