diff --git a/cmd/ipfs/main.go b/cmd/ipfs/main.go index f3672f18041..378334f1493 100644 --- a/cmd/ipfs/main.go +++ b/cmd/ipfs/main.go @@ -174,10 +174,11 @@ func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) { if err != nil { return nil, err } - if ok { - if _, err := loader.LoadPlugins(pluginpath); err != nil { - log.Error("error loading plugins: ", err) - } + if !ok { + pluginpath = "" + } + if _, err := loader.LoadPlugins(pluginpath); err != nil { + log.Error("error loading plugins: ", err) } exctr = cmds.NewExecutor(req.Root) diff --git a/plugin/loader/load.go b/plugin/loader/load.go index b6ddb7b3748..bce3e42c4fb 100644 --- a/plugin/loader/load.go +++ b/plugin/loader/load.go @@ -22,20 +22,22 @@ func LoadPlugins(pluginDir string) ([]plugin.Plugin, error) { plMap[v.Name()] = v } - newPls, err := loadDynamicPlugins(pluginDir) - if err != nil { - return nil, err - } + if pluginDir != "" { + newPls, err := loadDynamicPlugins(pluginDir) + if err != nil { + return nil, err + } - for _, pl := range newPls { - if ppl, ok := plMap[pl.Name()]; ok { - // plugin is already preloaded - return nil, fmt.Errorf( - "plugin: %s, is duplicated in version: %s, "+ - "while trying to load dynamically: %s", - ppl.Name(), ppl.Version(), pl.Version()) + for _, pl := range newPls { + if ppl, ok := plMap[pl.Name()]; ok { + // plugin is already preloaded + return nil, fmt.Errorf( + "plugin: %s, is duplicated in version: %s, "+ + "while trying to load dynamically: %s", + ppl.Name(), ppl.Version(), pl.Version()) + } + plMap[pl.Name()] = pl } - plMap[pl.Name()] = pl } pls := make([]plugin.Plugin, 0, len(plMap)) @@ -43,7 +45,7 @@ func LoadPlugins(pluginDir string) ([]plugin.Plugin, error) { pls = append(pls, v) } - err = initialize(pls) + err := initialize(pls) if err != nil { return nil, err } diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go index 46dd2cb9224..730f3538e74 100644 --- a/plugin/loader/preload.go +++ b/plugin/loader/preload.go @@ -2,7 +2,10 @@ package loader import ( "github.com/ipfs/go-ipfs/plugin" + pluginbadgerds "github.com/ipfs/go-ipfs/plugin/plugins/badgerds" + pluginflatfs "github.com/ipfs/go-ipfs/plugin/plugins/flatfs" pluginipldgit "github.com/ipfs/go-ipfs/plugin/plugins/git" + pluginlevelds "github.com/ipfs/go-ipfs/plugin/plugins/levelds" ) // DO NOT EDIT THIS FILE @@ -11,4 +14,7 @@ import ( var preloadPlugins = []plugin.Plugin{ pluginipldgit.Plugins[0], + pluginbadgerds.Plugins[0], + pluginflatfs.Plugins[0], + pluginlevelds.Plugins[0], } diff --git a/plugin/loader/preload_list b/plugin/loader/preload_list index 2905dfe1c31..6155dfbc782 100644 --- a/plugin/loader/preload_list +++ b/plugin/loader/preload_list @@ -4,3 +4,7 @@ # name go-path number of the sub-plugin ipldgit github.com/ipfs/go-ipfs/plugin/plugins/git 0 + +badgerds github.com/ipfs/go-ipfs/plugin/plugins/badgerds 0 +flatfs github.com/ipfs/go-ipfs/plugin/plugins/flatfs 0 +levelds github.com/ipfs/go-ipfs/plugin/plugins/levelds 0 diff --git a/plugin/plugins/Rules.mk b/plugin/plugins/Rules.mk index 450383be967..80924bad202 100644 --- a/plugin/plugins/Rules.mk +++ b/plugin/plugins/Rules.mk @@ -1,6 +1,6 @@ include mk/header.mk -$(d)_plugins:=$(d)/git +$(d)_plugins:=$(d)/git $(d)/badgerds $(d)/flatfs $(d)/levelds $(d)_plugins_so:=$(addsuffix .so,$($(d)_plugins)) $(d)_plugins_main:=$(addsuffix /main/main.go,$($(d)_plugins)) diff --git a/plugin/plugins/badgerds/badgerds.go b/plugin/plugins/badgerds/badgerds.go new file mode 100644 index 00000000000..bbc5509c942 --- /dev/null +++ b/plugin/plugins/badgerds/badgerds.go @@ -0,0 +1,114 @@ +package badgerds + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ipfs/go-ipfs/plugin" + "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/repo/fsrepo" + + humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" + badgerds "gx/ipfs/QmaixNkKwtinV3umL5VD1VDD5CQjnZhXY31awM2YHTzbui/go-ds-badger" +) + +// Plugins is exported list of plugins that will be loaded +var Plugins = []plugin.Plugin{ + &badgerdsPlugin{}, +} + +type badgerdsPlugin struct{} + +var _ plugin.PluginDatastore = (*badgerdsPlugin)(nil) + +func (*badgerdsPlugin) Name() string { + return "ds-badgerds" +} + +func (*badgerdsPlugin) Version() string { + return "0.1.0" +} + +func (*badgerdsPlugin) Init() error { + return nil +} + +func (*badgerdsPlugin) DatastoreTypeName() string { + return "badgerds" +} + +type datastoreConfig struct { + path string + syncWrites bool + + vlogFileSize int64 +} + +// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore +// from the given parameters +func (*badgerdsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { + return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) { + var c datastoreConfig + var ok bool + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not string") + } + + sw, ok := params["syncWrites"] + if !ok { + c.syncWrites = true + } else { + if swb, ok := sw.(bool); ok { + c.syncWrites = swb + } else { + return nil, fmt.Errorf("'syncWrites' field was not a boolean") + } + } + + vls, ok := params["vlogFileSize"] + if !ok { + // default to 1GiB + c.vlogFileSize = badgerds.DefaultOptions.ValueLogFileSize + } else { + if vlogSize, ok := vls.(string); ok { + s, err := humanize.ParseBytes(vlogSize) + if err != nil { + return nil, err + } + c.vlogFileSize = int64(s) + } else { + return nil, fmt.Errorf("'vlogFileSize' field was not a string") + } + } + + return &c, nil + } +} + +func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec { + return map[string]interface{}{ + "type": "badgerds", + "path": c.path, + } +} + +func (c *datastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + err := os.MkdirAll(p, 0755) + if err != nil { + return nil, err + } + + defopts := badgerds.DefaultOptions + defopts.SyncWrites = c.syncWrites + defopts.ValueLogFileSize = c.vlogFileSize + + return badgerds.NewDatastore(p, &defopts) +} diff --git a/plugin/plugins/flatfs/flatfs.go b/plugin/plugins/flatfs/flatfs.go new file mode 100644 index 00000000000..d7e98af0cdc --- /dev/null +++ b/plugin/plugins/flatfs/flatfs.go @@ -0,0 +1,90 @@ +package flatfs + +import ( + "fmt" + "path/filepath" + + "github.com/ipfs/go-ipfs/plugin" + "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/repo/fsrepo" + + flatfs "gx/ipfs/QmVFboKxbVJZMJAoFdvX6q4hzvXFkbWCE8DejnqrQV4ZtN/go-ds-flatfs" +) + +// Plugins is exported list of plugins that will be loaded +var Plugins = []plugin.Plugin{ + &flatfsPlugin{}, +} + +type flatfsPlugin struct{} + +var _ plugin.PluginDatastore = (*flatfsPlugin)(nil) + +func (*flatfsPlugin) Name() string { + return "ds-flatfs" +} + +func (*flatfsPlugin) Version() string { + return "0.1.0" +} + +func (*flatfsPlugin) Init() error { + return nil +} + +func (*flatfsPlugin) DatastoreTypeName() string { + return "flatfs" +} + +type datastoreConfig struct { + path string + shardFun *flatfs.ShardIdV1 + syncField bool +} + +// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore +// from the given parameters +func (*flatfsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { + return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) { + var c datastoreConfig + var ok bool + var err error + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not boolean") + } + + sshardFun, ok := params["shardFunc"].(string) + if !ok { + return nil, fmt.Errorf("'shardFunc' field is missing or not a string") + } + c.shardFun, err = flatfs.ParseShardFunc(sshardFun) + if err != nil { + return nil, err + } + + c.syncField, ok = params["sync"].(bool) + if !ok { + return nil, fmt.Errorf("'sync' field is missing or not boolean") + } + return &c, nil + } +} + +func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec { + return map[string]interface{}{ + "type": "flatfs", + "path": c.path, + "shardFunc": c.shardFun.String(), + } +} + +func (c *datastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + return flatfs.CreateOrOpen(p, c.shardFun, c.syncField) +} diff --git a/plugin/plugins/levelds/levelds.go b/plugin/plugins/levelds/levelds.go new file mode 100644 index 00000000000..268e605296f --- /dev/null +++ b/plugin/plugins/levelds/levelds.go @@ -0,0 +1,88 @@ +package levelds + +import ( + "fmt" + "path/filepath" + + "github.com/ipfs/go-ipfs/plugin" + "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/repo/fsrepo" + + ldbopts "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" + levelds "gx/ipfs/QmccqjKZUTqp4ikWNyAbjBuP5HEdqSqRuAr9mcEhYab54a/go-ds-leveldb" +) + +// Plugins is exported list of plugins that will be loaded +var Plugins = []plugin.Plugin{ + &leveldsPlugin{}, +} + +type leveldsPlugin struct{} + +var _ plugin.PluginDatastore = (*leveldsPlugin)(nil) + +func (*leveldsPlugin) Name() string { + return "ds-level" +} + +func (*leveldsPlugin) Version() string { + return "0.1.0" +} + +func (*leveldsPlugin) Init() error { + return nil +} + +func (*leveldsPlugin) DatastoreTypeName() string { + return "levelds" +} + +type datastoreConfig struct { + path string + compression ldbopts.Compression +} + +// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore +// from the given parameters +func (*leveldsPlugin) DatastoreConfigParser() fsrepo.ConfigFromMap { + return func(params map[string]interface{}) (fsrepo.DatastoreConfig, error) { + var c datastoreConfig + var ok bool + + c.path, ok = params["path"].(string) + if !ok { + return nil, fmt.Errorf("'path' field is missing or not string") + } + + switch cm := params["compression"].(string); cm { + case "none": + c.compression = ldbopts.NoCompression + case "snappy": + c.compression = ldbopts.SnappyCompression + case "": + c.compression = ldbopts.DefaultCompression + default: + return nil, fmt.Errorf("unrecognized value for compression: %s", cm) + } + + return &c, nil + } +} + +func (c *datastoreConfig) DiskSpec() fsrepo.DiskSpec { + return map[string]interface{}{ + "type": "levelds", + "path": c.path, + } +} + +func (c *datastoreConfig) Create(path string) (repo.Datastore, error) { + p := c.path + if !filepath.IsAbs(p) { + p = filepath.Join(path, p) + } + + return levelds.NewDatastore(p, &levelds.Options{ + Compression: c.compression, + }) +} diff --git a/repo/fsrepo/config_test.go b/repo/fsrepo/config_test.go index 8ab6846c669..07d3349efce 100644 --- a/repo/fsrepo/config_test.go +++ b/repo/fsrepo/config_test.go @@ -1,4 +1,4 @@ -package fsrepo +package fsrepo_test import ( "encoding/json" @@ -7,7 +7,10 @@ import ( "reflect" "testing" - config "gx/ipfs/QmbK4EmM2Xx5fmbqK38TGP3PpY66r3tkXLZTcc7dF9mFwM/go-ipfs-config" + "github.com/ipfs/go-ipfs/plugin/loader" + "github.com/ipfs/go-ipfs/repo/fsrepo" + + "gx/ipfs/QmPEpj17FDRpc7K1aArKZp3RsHtzRMKykeK9GVgn4WQGPR/go-ipfs-config" ) // note: to test sorting of the mountpoints in the disk spec they are @@ -72,6 +75,8 @@ var measureConfig = []byte(`{ }`) func TestDefaultDatastoreConfig(t *testing.T) { + loader.LoadPlugins("") + dir, err := ioutil.TempDir("", "ipfs-datastore-config-test") if err != nil { t.Fatal(err) @@ -84,7 +89,7 @@ func TestDefaultDatastoreConfig(t *testing.T) { t.Fatal(err) } - dsc, err := AnyDatastoreConfig(config.Spec) + dsc, err := fsrepo.AnyDatastoreConfig(config.Spec) if err != nil { t.Fatal(err) } @@ -122,7 +127,7 @@ func TestLevelDbConfig(t *testing.T) { t.Fatal(err) } - dsc, err := AnyDatastoreConfig(spec) + dsc, err := fsrepo.AnyDatastoreConfig(spec) if err != nil { t.Fatal(err) } @@ -160,7 +165,7 @@ func TestFlatfsConfig(t *testing.T) { t.Fatal(err) } - dsc, err := AnyDatastoreConfig(spec) + dsc, err := fsrepo.AnyDatastoreConfig(spec) if err != nil { t.Fatal(err) } @@ -198,7 +203,7 @@ func TestMeasureConfig(t *testing.T) { t.Fatal(err) } - dsc, err := AnyDatastoreConfig(spec) + dsc, err := fsrepo.AnyDatastoreConfig(spec) if err != nil { t.Fatal(err) } diff --git a/repo/fsrepo/datastores.go b/repo/fsrepo/datastores.go index 0408ec24462..5b45bc2fbff 100644 --- a/repo/fsrepo/datastores.go +++ b/repo/fsrepo/datastores.go @@ -4,20 +4,13 @@ import ( "bytes" "encoding/json" "fmt" - "os" - "path/filepath" "sort" - repo "github.com/ipfs/go-ipfs/repo" + "github.com/ipfs/go-ipfs/repo" - humanize "gx/ipfs/QmPSBJL4momYnE7DcUyk2DVhD6rH488ZmHBGLbxNdhU44K/go-humanize" - measure "gx/ipfs/QmQS6UXi1R87y9nEgnCNmG6YfMzvBSLir7xUheMNFP3hoe/go-ds-measure" - flatfs "gx/ipfs/QmVFboKxbVJZMJAoFdvX6q4hzvXFkbWCE8DejnqrQV4ZtN/go-ds-flatfs" + "gx/ipfs/QmQS6UXi1R87y9nEgnCNmG6YfMzvBSLir7xUheMNFP3hoe/go-ds-measure" ds "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore" - mount "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore/mount" - badgerds "gx/ipfs/QmaixNkKwtinV3umL5VD1VDD5CQjnZhXY31awM2YHTzbui/go-ds-badger" - ldbopts "gx/ipfs/QmbBhyDKsY4mbY6xsKt3qu9Y7FPvMJ6qbD8AMjYYvPRw1g/goleveldb/leveldb/opt" - levelds "gx/ipfs/QmccqjKZUTqp4ikWNyAbjBuP5HEdqSqRuAr9mcEhYab54a/go-ds-leveldb" + "gx/ipfs/QmaRb5yNXKonhbkpNxNawoydk4N6es6b4fPj19sjEKsh5D/go-datastore/mount" ) // ConfigFromMap creates a new datastore config from a map @@ -63,13 +56,10 @@ var datastores map[string]ConfigFromMap func init() { datastores = map[string]ConfigFromMap{ - "mount": MountDatastoreConfig, - "flatfs": FlatfsDatastoreConfig, - "levelds": LeveldsDatastoreConfig, - "badgerds": BadgerdsDatastoreConfig, - "mem": MemDatastoreConfig, - "log": LogDatastoreConfig, - "measure": MeasureDatastoreConfig, + "mount": MountDatastoreConfig, + "mem": MemDatastoreConfig, + "log": LogDatastoreConfig, + "measure": MeasureDatastoreConfig, } } @@ -170,103 +160,6 @@ func (c *mountDatastoreConfig) Create(path string) (repo.Datastore, error) { return mount.New(mounts), nil } -type flatfsDatastoreConfig struct { - path string - shardFun *flatfs.ShardIdV1 - syncField bool -} - -// FlatfsDatastoreConfig returns a flatfs DatastoreConfig from a spec -func FlatfsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { - var c flatfsDatastoreConfig - var ok bool - var err error - - c.path, ok = params["path"].(string) - if !ok { - return nil, fmt.Errorf("'path' field is missing or not boolean") - } - - sshardFun, ok := params["shardFunc"].(string) - if !ok { - return nil, fmt.Errorf("'shardFunc' field is missing or not a string") - } - c.shardFun, err = flatfs.ParseShardFunc(sshardFun) - if err != nil { - return nil, err - } - - c.syncField, ok = params["sync"].(bool) - if !ok { - return nil, fmt.Errorf("'sync' field is missing or not boolean") - } - return &c, nil -} - -func (c *flatfsDatastoreConfig) DiskSpec() DiskSpec { - return map[string]interface{}{ - "type": "flatfs", - "path": c.path, - "shardFunc": c.shardFun.String(), - } -} - -func (c *flatfsDatastoreConfig) Create(path string) (repo.Datastore, error) { - p := c.path - if !filepath.IsAbs(p) { - p = filepath.Join(path, p) - } - - return flatfs.CreateOrOpen(p, c.shardFun, c.syncField) -} - -type leveldsDatastoreConfig struct { - path string - compression ldbopts.Compression -} - -// LeveldsDatastoreConfig returns a levelds DatastoreConfig from a spec -func LeveldsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { - var c leveldsDatastoreConfig - var ok bool - - c.path, ok = params["path"].(string) - if !ok { - return nil, fmt.Errorf("'path' field is missing or not string") - } - - switch cm := params["compression"].(string); cm { - case "none": - c.compression = ldbopts.NoCompression - case "snappy": - c.compression = ldbopts.SnappyCompression - case "": - c.compression = ldbopts.DefaultCompression - default: - return nil, fmt.Errorf("unrecognized value for compression: %s", cm) - } - - return &c, nil -} - -func (c *leveldsDatastoreConfig) DiskSpec() DiskSpec { - return map[string]interface{}{ - "type": "levelds", - "path": c.path, - } -} - -func (c *leveldsDatastoreConfig) Create(path string) (repo.Datastore, error) { - p := c.path - if !filepath.IsAbs(p) { - p = filepath.Join(path, p) - } - - return levelds.NewDatastore(p, &levelds.Options{ - Compression: c.compression, - }) -} - type memDatastoreConfig struct { cfg map[string]interface{} } @@ -352,76 +245,3 @@ func (c measureDatastoreConfig) Create(path string) (repo.Datastore, error) { } return measure.New(c.prefix, child), nil } - -type badgerdsDatastoreConfig struct { - path string - syncWrites bool - - vlogFileSize int64 -} - -// BadgerdsDatastoreConfig returns a configuration stub for a badger datastore -// from the given parameters -func BadgerdsDatastoreConfig(params map[string]interface{}) (DatastoreConfig, error) { - var c badgerdsDatastoreConfig - var ok bool - - c.path, ok = params["path"].(string) - if !ok { - return nil, fmt.Errorf("'path' field is missing or not string") - } - - sw, ok := params["syncWrites"] - if !ok { - c.syncWrites = true - } else { - if swb, ok := sw.(bool); ok { - c.syncWrites = swb - } else { - return nil, fmt.Errorf("'syncWrites' field was not a boolean") - } - } - - vls, ok := params["vlogFileSize"] - if !ok { - // default to 1GiB - c.vlogFileSize = badgerds.DefaultOptions.ValueLogFileSize - } else { - if vlogSize, ok := vls.(string); ok { - s, err := humanize.ParseBytes(vlogSize) - if err != nil { - return nil, err - } - c.vlogFileSize = int64(s) - } else { - return nil, fmt.Errorf("'vlogFileSize' field was not a string") - } - } - - return &c, nil -} - -func (c *badgerdsDatastoreConfig) DiskSpec() DiskSpec { - return map[string]interface{}{ - "type": "badgerds", - "path": c.path, - } -} - -func (c *badgerdsDatastoreConfig) Create(path string) (repo.Datastore, error) { - p := c.path - if !filepath.IsAbs(p) { - p = filepath.Join(path, p) - } - - err := os.MkdirAll(p, 0755) - if err != nil { - return nil, err - } - - defopts := badgerds.DefaultOptions - defopts.SyncWrites = c.syncWrites - defopts.ValueLogFileSize = c.vlogFileSize - - return badgerds.NewDatastore(p, &defopts) -}